prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | __all__ = ["pval_task", "annotation_task"] |
<|file_name|>db.go<|end_file_name|><|fim▁begin|>// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"database/sql"
"database/sql/driver"
"fmt"
"hash/crc32"
"strings"
"time"
pbinlog "github.com/cwen0/cdb-syncer/protocol"
"github.com/go-sql-driver/mysql"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
tddl "github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/infoschema"
tmysql "github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/terror"
)
type job struct {
tp pbinlog.BinlogType
sql string
args []interface{}
key string
retry bool
pos Position
}
func newJob(tp pbinlog.BinlogType, sql string, args []interface{}, key string, retry bool, pos Position) *job {
return &job{tp: tp, sql: sql, args: args, key: key, retry: retry, pos: pos}
}
func genHashKey(key string) uint32 {
return crc32.ChecksumIEEE([]byte(key))
}
func genPKey(rows []*pbinlog.Row) string {
var values []string
for _, row := range rows {
values = append(values, row.GetColumnValue())
}
return strings.Join(values, ",")
}
func genInsertSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "replace into " + binlog.GetDbName() + "." + binlog.GetTableName() + "("
rows := binlog.GetRows()
for _, row := range rows {
sql += row.GetColumnName() + ","
values = append(values, row.GetColumnValue())
}
sql = sql[0:len(sql)-1] + ") values ("
for _, _ = range rows {
sql += "?,"
}
sql = sql[0:len(sql)-1] + ")"
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genUpdateSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "update " + binlog.GetDbName() + "." + binlog.GetTableName() + " set "
rows := binlog.GetRows()
for _, row := range rows {
sql += row.GetColumnName() + "=?,"
values = append(values, row.GetColumnValue())
}
sql = sql[0:len(sql)-1] + " where 1=1 "
for _, row := range binlog.GetPrimaryKey() {
sql += " and " + row.GetColumnName() + " = ? "
values = append(values, row.GetColumnValue())
}
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genDeleteSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "delete from " + binlog.GetDbName() + "." + binlog.GetTableName() + " where 1=1 "
for _, row := range binlog.GetPrimaryKey() {
sql += " and " + row.GetColumnName() + " = ? "
values = append(values, row.GetColumnValue())
}
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genDdlSQL(binlog *pbinlog.Binlog) ([]string, string, []interface{}, error) {
var sqls []string
empty := make([]interface{}, 0)
rows := binlog.GetRows()
for _, row := range rows {
tmpSqls, ok, err := resolveDDLSQL(row.GetSql())
if err != nil {
return sqls, "", empty, errors.Errorf("parse ddk sql: %v failed: %v", row.GetSql(), err)
}
if !ok {
continue
}
for _, sql := range tmpSqls {
//var sql string
//if binlog.GetDbName() != "" {
//sql += "use " + binlog.GetDbName() + ";"
//}
//sql += s + ";"
sqls = append(sqls, sql)
}
}
return sqls, "", empty, nil
}
func ignoreDDLError(err error) bool {
mysqlErr, ok := errors.Cause(err).(*mysql.MySQLError)
if !ok {
return false
}
errCode := terror.ErrCode(mysqlErr.Number)
switch errCode {
case infoschema.ErrDatabaseExists.Code(), infoschema.ErrDatabaseNotExists.Code(), infoschema.ErrDatabaseDropExists.Code(),
infoschema.ErrTableExists.Code(), infoschema.ErrTableNotExists.Code(), infoschema.ErrTableDropExists.Code(),
infoschema.ErrColumnExists.Code(), infoschema.ErrColumnNotExists.Code(),
infoschema.ErrIndexExists.Code(), tddl.ErrCantDropFieldOrKey.Code():
return true
default:
return false
}
}
func isRetryableError(err error) bool {
if err == driver.ErrBadConn {
return true
}
var e error
for {
e = errors.Cause(err)
if err == e {
break
}
err = e
}
mysqlErr, ok := err.(*mysql.MySQLError)
if ok {
if mysqlErr.Number == tmysql.ErrUnknown {
return true
}
return false
}
return true
}
func querySQL(db *sql.DB, query string) (*sql.Rows, error) {
var (
err error
rows *sql.Rows
)
for i := 0; i < maxRetryCount; i++ {
if i > 0 {
log.Warnf("query sql retry %d - %s", i, query)
time.Sleep(retryTimeout)
}
log.Debugf("[query][sql]%s", query)
rows, err = db.Query(query)
if err != nil {
if !isRetryableError(err) {
return rows, errors.Trace(err)
}
log.Warnf("[query][sql]%s[error]%v", query, err)
continue
}
return rows, nil
}
if err != nil {
log.Errorf("query sql[%s] failed %v", query, errors.ErrorStack(err))
return nil, errors.Trace(err)
}
return nil, errors.Errorf("query sql[%s] failed", query)
}
func executeSQL(db *sql.DB, sqls []string, args [][]interface{}, retry bool) error {
if len(sqls) == 0 {
return nil
}
var (
err error
txn *sql.Tx
)
retryCount := 1
if retry {
retryCount = maxRetryCount
}
<|fim▁hole|> time.Sleep(retryTimeout)
}
txn, err = db.Begin()
if err != nil {
log.Errorf("exec sqls[%v] begin failed %v", sqls, errors.ErrorStack(err))
continue
}
for i := range sqls {
log.Debugf("[exec][sql]%s[args]%v", sqls[i], args[i])
_, err = txn.Exec(sqls[i], args[i]...)
if err != nil {
if !isRetryableError(err) {
rerr := txn.Rollback()
if rerr != nil {
log.Errorf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], rerr)
}
break LOOP
}
log.Warnf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], err)
rerr := txn.Rollback()
if rerr != nil {
log.Errorf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], rerr)
}
continue LOOP
}
}
err = txn.Commit()
if err != nil {
log.Errorf("exec sqls[%v] commit failed %v", sqls, errors.ErrorStack(err))
continue
}
return nil
}
if err != nil {
log.Errorf("exec sqls[%v] failed %v", sqls, errors.ErrorStack(err))
return errors.Trace(err)
}
return errors.Errorf("exec sqls[%v] failed", sqls)
}
func createDB(cfg DBConfig) (*sql.DB, error) {
dbDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8&interpolateParams=true", cfg.User, cfg.Password, cfg.Host, cfg.Port)
db, err := sql.Open("mysql", dbDSN)
if err != nil {
return nil, errors.Trace(err)
}
return db, nil
}
func closeDB(db *sql.DB) error {
if db == nil {
return nil
}
return errors.Trace(db.Close())
}
func createDBs(cfg DBConfig, count int) ([]*sql.DB, error) {
dbs := make([]*sql.DB, 0, count)
for i := 0; i < count; i++ {
db, err := createDB(cfg)
if err != nil {
return nil, errors.Trace(err)
}
dbs = append(dbs, db)
}
return dbs, nil
}
func closeDBs(dbs ...*sql.DB) {
for _, db := range dbs {
err := closeDB(db)
if err != nil {
log.Errorf("close db failed - %v", err)
}
}
}
func parserDDLTableName(sql string) (TableName, error) {
stmt, err := parser.New().ParseOneStmt(sql, "", "")
if err != nil {
return TableName{}, errors.Trace(err)
}
var res TableName
switch v := stmt.(type) {
case *ast.CreateDatabaseStmt:
res = genTableName(v.Name, "")
case *ast.DropDatabaseStmt:
res = genTableName(v.Name, "")
case *ast.CreateIndexStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.CreateTableStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.DropIndexStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.TruncateTableStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.DropTableStmt:
if len(v.Tables) != 1 {
return res, errors.Errorf("may resovle DDL sql failed")
}
res = genTableName(v.Tables[0].Schema.L, v.Tables[0].Name.L)
default:
return res, errors.Errorf("unkown DDL type")
}
return res, nil
}
func genTableName(schema string, table string) TableName {
return TableName{Schema: schema, Name: table}
}
// resolveDDLSQL resolve to one ddl sql
// example: drop table test.a,test2.b -> drop table test.a; drop table test2.b;
func resolveDDLSQL(sql string) (sqls []string, ok bool, err error) {
stmt, err := parser.New().ParseOneStmt(sql, "", "")
if err != nil {
log.Errorf("Parser SQL error: %s", sql)
return nil, false, errors.Trace(err)
}
_, isDDL := stmt.(ast.DDLNode)
if !isDDL {
sqls = append(sqls, sql)
return
}
switch v := stmt.(type) {
case *ast.DropTableStmt:
var ex string
if v.IfExists {
ex = "if exists"
}
for _, t := range v.Tables {
var db string
if t.Schema.O != "" {
db = fmt.Sprintf("`%s`.", t.Schema.O)
}
s := fmt.Sprintf("drop table %s %s`%s`", ex, db, t.Name.O)
sqls = append(sqls, s)
}
default:
sqls = append(sqls, sql)
}
return sqls, true, nil
}<|fim▁end|> | LOOP:
for i := 0; i < retryCount; i++ {
if i > 0 {
log.Warnf("exec sql retry %d - %v - %v", i, sqls, args) |
<|file_name|>BottomSheet.js<|end_file_name|><|fim▁begin|>/* @flow */
import React from "react";
import PropTypes from "prop-types";
import {
Text,
TouchableOpacity,
Platform,
StyleSheet,
ScrollView,
PixelRatio,
View
} from "react-native";
import Modal from "react-native-modalbox";
type Props = {
styleContainer?: Object,
coverScreen?: boolean,
backButtonEnabled?: boolean,
height?: number,
title?: string,
options: Array<Object>,
refs: Function,
fontFamily?: string,
titleFontFamily?: string,
isOpen?: boolean,
cancelButtonIndex?: number,
itemDivider?: number
};
type State = void;
class BottomSheet extends React.PureComponent<Props, State> {
open: Function;
static propTypes = {
styleContainer: PropTypes.object,
coverScreen: PropTypes.bool,
backButtonEnabled: PropTypes.bool,
height: PropTypes.number,
title: PropTypes.string,
options: PropTypes.arrayOf(PropTypes.object).isRequired,
refs: PropTypes.func.isRequired,
fontFamily: PropTypes.string,
titleFontFamily: PropTypes.string,
isOpen: PropTypes.bool,
cancelButtonIndex: PropTypes.number,
itemDivider: PropTypes.number
};
renderOption = (options: Array<Object>) => {
return options.map((item, index) => {
return (
<View style={{ flexDirection: "column" }} key={index}>
<TouchableOpacity onPress={item.onPress}>
<View style={styles.item}>
{item.icon}
<Text
style={[styles.text, { fontFamily: this.props.fontFamily }]}
>
{item.title}
</Text>
</View>
</TouchableOpacity>
{this.props.itemDivider === index + 1 ? (
<View style={styles.separator} />
) : null}
</View>
);
});
};
renderTitle = () => {
if (!this.props.title) {
return;
}
return (
<Text style={[styles.title, { fontFamily: this.props.titleFontFamily }]}>
{this.props.title}
</Text>
);
};
render() {
return (
<Modal
style={[this.props.styleContainer, { height: this.props.height }]}
backButtonClose={this.props.backButtonEnabled}
position="bottom"
isOpen={this.props.isOpen}
ref={this.props.refs}
coverScreen={this.props.coverScreen}
>
<ScrollView style={styles.modal}>
{this.renderTitle()}
{this.renderOption(this.props.options)}
</ScrollView><|fim▁hole|> </Modal>
);
}
}
const styles = StyleSheet.create({
text: {
paddingHorizontal: 32,
fontFamily: "Roboto",
textAlignVertical: "center",
color: "#000",
opacity: 0.87
},
item: {
flexDirection: "row",
height: 48,
alignItems: "center",
paddingLeft: 16,
paddingRight: 16
},
title: {
height: 42,
color: "#000",
opacity: 0.54,
marginLeft: 16
},
modal: {
marginTop: 16
},
separator: {
height: 1 / PixelRatio.get(),
backgroundColor: "#CCCCCC",
marginTop: 7,
marginBottom: 8,
width: "100%"
}
});
export default BottomSheet;<|fim▁end|> | |
<|file_name|>update_listed_companies.py<|end_file_name|><|fim▁begin|>import logging
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import requests
from companies.models import Company
logger = logging.getLogger('jobs.management.commands')
<|fim▁hole|>class Command(BaseCommand):
help = 'Update currently listed companies'
def handle(self, *args, **options):
logger.info('Started updating currently listed companies')
companies = Company.objects.filter(is_index=False)
r = requests.get(settings.COMPANY_LIST_URL)
records = r.json()['records']
for record in records:
symbol = record['securitySymbol']
name = record['securityName']
listing_date = record['listingDate'].split()[0]
status = record['securityStatus']
try:
company = companies.get(symbol=symbol)
companies = companies.exclude(id=company.id)
except Company.DoesNotExist:
company = Company(symbol=symbol)
company.name = name
company.is_currently_listed = True
company.is_suspended = True if status == 'S' else False
company.listing_date = datetime.strptime(listing_date, '%Y-%m-%d').date()
company.save()
companies.update(is_currently_listed=False, is_suspended=False)
logger.info('Finished updating currently listed companies')<|fim▁end|> | |
<|file_name|>rate_collection.py<|end_file_name|><|fim▁begin|>"""A collection of classes and methods to deal with collections of
rates that together make up a network."""
# Common Imports
import warnings
import functools
import math
import os
from operator import mul
from collections import OrderedDict
from ipywidgets import interact
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MaxNLocator
import networkx as nx
# Import Rate
from pynucastro.rates import Rate, Nucleus, Library
mpl.rcParams['figure.dpi'] = 100
class Composition:
"""a composition holds the mass fractions of the nuclei in a network
-- useful for evaluating the rates
"""
def __init__(self, nuclei, small=1.e-16):
"""nuclei is an iterable of the nuclei (Nucleus objects) in the network"""
if not isinstance(nuclei[0], Nucleus):
raise ValueError("must supply an iterable of Nucleus objects")
else:
self.X = {k: small for k in nuclei}
def set_solar_like(self, Z=0.02):
""" approximate a solar abundance, setting p to 0.7, He4 to 0.3 - Z and
the remainder evenly distributed with Z """
num = len(self.X)
rem = Z/(num-2)
for k in self.X:
if k == Nucleus("p"):
self.X[k] = 0.7
elif k.raw == "he4":
self.X[k] = 0.3 - Z
else:
self.X[k] = rem
self.normalize()
def set_all(self, xval):
""" set all species to a particular value """
for k in self.X:
self.X[k] = xval
def set_nuc(self, name, xval):
""" set nuclei name to the mass fraction xval """
for k in self.X:
if k.raw == name:
self.X[k] = xval
break
def normalize(self):
""" normalize the mass fractions to sum to 1 """
X_sum = sum(self.X[k] for k in self.X)
for k in self.X:
self.X[k] /= X_sum
def get_molar(self):
""" return a dictionary of molar fractions"""
molar_frac = {k: v/k.A for k, v in self.X.items()}
return molar_frac
def eval_ye(self):
""" return the electron fraction """
zvec = []
avec = []
xvec = []
for n in self.X:
zvec.append(n.Z)
avec.append(n.A)
xvec.append(self.X[n])
zvec = np.array(zvec)
avec = np.array(avec)
xvec = np.array(xvec)
electron_frac = np.sum(zvec*xvec/avec)/np.sum(xvec)
return electron_frac
def __str__(self):
ostr = ""
for k in self.X:
ostr += f" X({k}) : {self.X[k]}\n"
return ostr
class RateCollection:
""" a collection of rates that together define a network """
pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, rate_files=None, libraries=None, rates=None, precedence=()):
"""
rate_files are the files that together define the network. This
can be any iterable or single string.
This can include Reaclib library files storing multiple rates.
If libraries is supplied, initialize a RateCollection using the rates
in the Library object(s) in list 'libraries'.
If rates is supplied, initialize a RateCollection using the
Rate objects in the list 'rates'.
Precedence should be sequence of rate labels (e.g. wc17) to be used to
resolve name conflicts. If a nonempty sequence is provided, the rate
collection will automatically be scanned for multiple rates with the
same name. If all of their labels were given a ranking, the rate with
the label that comes first in the sequence will be retained and the
rest discarded.
Any combination of these options may be supplied.
"""
self.files = []
self.rates = []
self.library = None
if rate_files:
if isinstance(rate_files, str):
rate_files = [rate_files]
self._read_rate_files(rate_files)
if rates:
if isinstance(rates, Rate):
rates = [rates]
try:
for r in rates:
assert isinstance(r, Rate)
except:
print('Expected Rate object or list of Rate objects passed as the rates argument.')
raise
else:
rlib = Library(rates=rates)
if not self.library:
self.library = rlib
else:
self.library = self.library + rlib
if libraries:
if isinstance(libraries, Library):
libraries = [libraries]
try:
for lib in libraries:
assert isinstance(lib, Library)
except:
print('Expected Library object or list of Library objects passed as the libraries argument.')
raise
else:
if not self.library:
self.library = libraries.pop(0)
for lib in libraries:
self.library = self.library + lib
if self.library:
self.rates = self.rates + self.library.get_rates()
if precedence:
self._make_distinguishable(precedence)
# get the unique nuclei
u = []
for r in self.rates:
t = set(r.reactants + r.products)
u = set(list(u) + list(t))
self.unique_nuclei = sorted(u)
# now make a list of each rate that touches each nucleus
# we'll store this in a dictionary keyed on the nucleus
self.nuclei_consumed = OrderedDict()
self.nuclei_produced = OrderedDict()
for n in self.unique_nuclei:
self.nuclei_consumed[n] = [r for r in self.rates if n in r.reactants]
self.nuclei_produced[n] = [r for r in self.rates if n in r.products]
# Re-order self.rates so Reaclib rates come first,
# followed by Tabular rates. This is needed if
# reaclib coefficients are targets of a pointer array
# in the Fortran network.
# It is desired to avoid wasting array size
# storing meaningless Tabular coefficient pointers.
self.rates = sorted(self.rates,
key=lambda r: r.chapter == 't')
self.tabular_rates = []
self.reaclib_rates = []
for n, r in enumerate(self.rates):
if r.chapter == 't':
self.tabular_rates.append(n)
elif isinstance(r.chapter, int):
self.reaclib_rates.append(n)
else:
print('ERROR: Chapter type unknown for rate chapter {}'.format(
str(r.chapter)))
exit()
def _read_rate_files(self, rate_files):
# get the rates
self.files = rate_files
for rf in self.files:
try:
rflib = Library(rf)
except:
print(f"Error reading library from file: {rf}")
raise
else:
if not self.library:
self.library = rflib
else:
self.library = self.library + rflib
def get_nuclei(self):
""" get all the nuclei that are part of the network """
return self.unique_nuclei
def evaluate_rates(self, rho, T, composition):
"""evaluate the rates for a specific density, temperature, and
composition"""
rvals = OrderedDict()
ys = composition.get_molar()
y_e = composition.eval_ye()
for r in self.rates:
val = r.prefactor * rho**r.dens_exp * r.eval(T, rho * y_e)
if (r.weak_type == 'electron_capture' and not r.tabular):
val = val * y_e
yfac = functools.reduce(mul, [ys[q] for q in r.reactants])
rvals[r] = yfac * val
return rvals
def evaluate_ydots(self, rho, T, composition):
"""evaluate net rate of change of molar abundance for each nucleus
for a specific density, temperature, and composition"""
rvals = self.evaluate_rates(rho, T, composition)
ydots = dict()
for nuc in self.unique_nuclei:
# Rates that consume / produce nuc
consuming_rates = self.nuclei_consumed[nuc]
producing_rates = self.nuclei_produced[nuc]
# Number of nuclei consumed / produced
nconsumed = (r.reactants.count(nuc) for r in consuming_rates)
nproduced = (r.products.count(nuc) for r in producing_rates)
# Multiply each rate by the count
consumed = (c * rvals[r] for c, r in zip(nconsumed, consuming_rates))
produced = (c * rvals[r] for c, r in zip(nproduced, producing_rates))
# Net change is difference between produced and consumed
ydots[nuc] = sum(produced) - sum(consumed)
return ydots
def evaluate_activity(self, rho, T, composition):
"""sum over all of the terms contributing to ydot,
neglecting sign"""
rvals = self.evaluate_rates(rho, T, composition)
act = dict()
for nuc in self.unique_nuclei:
# Rates that consume / produce nuc
consuming_rates = self.nuclei_consumed[nuc]
producing_rates = self.nuclei_produced[nuc]
# Number of nuclei consumed / produced
nconsumed = (r.reactants.count(nuc) for r in consuming_rates)
nproduced = (r.products.count(nuc) for r in producing_rates)
# Multiply each rate by the count
consumed = (c * rvals[r] for c, r in zip(nconsumed, consuming_rates))
produced = (c * rvals[r] for c, r in zip(nproduced, producing_rates))
# Net activity is sum of produced and consumed
act[nuc] = sum(produced) + sum(consumed)
return act
def network_overview(self):
""" return a verbose network overview """
ostr = ""
for n in self.unique_nuclei:
ostr += f"{n}\n"
ostr += " consumed by:\n"
for r in self.nuclei_consumed[n]:
ostr += f" {r.string}\n"
ostr += " produced by:\n"
for r in self.nuclei_produced[n]:
ostr += f" {r.string}\n"
ostr += "\n"
return ostr
def get_screening_map(self):
"""a screening map is just a list of tuples containing the information
about nuclei pairs for screening: (descriptive name of nuclei,
nucleus 1, nucleus 2, rate, 1-based index of rate)
"""
screening_map = []
for k, r in enumerate(self.rates):
if r.ion_screen:
nucs = "_".join([str(q) for q in r.ion_screen])
in_map = False
for h, _, _, mrates, krates in screening_map:
if h == nucs:
# if we already have the reactants, then we
# will already be doing the screening factors,
# so just append this new rate to the list we
# are keeping of the rates where this
# screening is needed
in_map = True
mrates.append(r)
krates.append(k+1)
break
if not in_map:
# we handle 3-alpha specially -- we actually need 2 screening factors for it
if nucs == "he4_he4_he4":
# he4 + he4
screening_map.append((nucs, r.ion_screen[0], r.ion_screen[1],
[r], [k+1]))
# he4 + be8
be8 = Nucleus("Be8", dummy=True)
screening_map.append((nucs+"_dummy", r.ion_screen[2], be8,
[r], [k+1]))
else:
screening_map.append((nucs, r.ion_screen[0], r.ion_screen[1],
[r], [k+1]))
return screening_map
def write_network(self, *args, **kwargs):
"""Before writing the network, check to make sure the rates
are distinguishable by name."""
assert self._distinguishable_rates(), "ERROR: Rates not uniquely identified by Rate.fname"
self._write_network(*args, **kwargs)
def _distinguishable_rates(self):
"""Every Rate in this RateCollection should have a unique Rate.fname,
as the network writers distinguish the rates on this basis."""
names = [r.fname for r in self.rates]
for n, r in zip(names, self.rates):
k = names.count(n)
if k > 1:
print(f'Found rate {r} named {n} with {k} entries in the RateCollection.')
print(f'Rate {r} has the original source:\n{r.original_source}')
print(f'Rate {r} is in chapter {r.chapter}')
return len(set(names)) == len(self.rates)
def _make_distinguishable(self, precedence):
"""If multiple rates have the same name, eliminate the extraneous ones according to their
labels' positions in the precedence list. Only do this if all of the labels have
rankings in the list."""
nameset = {r.fname for r in self.rates}
precedence = {lab: i for i, lab in enumerate(precedence)}
def sorting_key(i): return precedence[self.rates[i].label]
for n in nameset:
# Count instances of name, and cycle if there is only one
ind = [i for i, r in enumerate(self.rates) if r.fname == n]
k = len(ind)
if k <= 1: continue
# If there were multiple instances, use the precedence settings to delete extraneous
# rates
labels = [self.rates[i].label for i in ind]
if all(lab in precedence for lab in labels):
sorted_ind = sorted(ind, key=sorting_key)
r = self.rates[sorted_ind[0]]
for i in sorted(sorted_ind[1:], reverse=True): del self.rates[i]
print(f'Found rate {r} named {n} with {k} entries in the RateCollection.')
print(f'Kept only entry with label {r.label} out of {labels}.')
def _write_network(self, *args, **kwargs):
"""A stub for function to output the network -- this is implementation
dependent."""
print('To create network integration source code, use a class that implements a specific network type.')
return
def plot(self, outfile=None, rho=None, T=None, comp=None,
size=(800, 600), dpi=100, title=None,
ydot_cutoff_value=None,
node_size=1000, node_font_size=13, node_color="#A0CBE2", node_shape="o",
N_range=None, Z_range=None, rotated=False,
always_show_p=False, always_show_alpha=False, hide_xalpha=False, filter_function=None):
"""Make a plot of the network structure showing the links between
nuclei. If a full set of thermodymamic conditions are
provided (rho, T, comp), then the links are colored by rate
strength.
parameters
----------
outfile: output name of the plot -- extension determines the type
rho: density to evaluate rates with
T: temperature to evaluate rates with
comp: composition to evaluate rates with
size: tuple giving width x height of the plot in inches
dpi: pixels per inch used by matplotlib in rendering bitmap
title: title to display on the plot
ydot_cutoff_value: rate threshold below which we do not show a
line corresponding to a rate
node_size: size of a node
node_font_size: size of the font used to write the isotope in the node
node_color: color to make the nodes
node_shape: shape of the node (using matplotlib marker names)
N_range: range of neutron number to zoom in on
Z_range: range of proton number to zoom in on
rotate: if True, we plot A - 2Z vs. Z instead of the default Z vs. N
always_show_p: include p as a node on the plot even if we
don't have p+p reactions
always_show_alpha: include He4 as a node on the plot even if we don't have 3-alpha
hide_xalpha=False: dont connect the links to alpha for heavy
nuclei reactions of the form A(alpha,X)B or A(X,alpha)B, except if alpha
is the heaviest product.
filter_function: name of a custom function that takes the list
of nuclei and returns a new list with the nuclei to be shown
as nodes.
"""
G = nx.MultiDiGraph()
G.position = {}
G.labels = {}
fig, ax = plt.subplots()
#divider = make_axes_locatable(ax)
#cax = divider.append_axes('right', size='15%', pad=0.05)
#ax.plot([0, 0], [8, 8], 'b-')
# in general, we do not show p, n, alpha,
# unless we have p + p, 3-a, etc.
hidden_nuclei = ["n"]
if not always_show_p:
hidden_nuclei.append("p")
if not always_show_alpha:
hidden_nuclei.append("he4")
# nodes -- the node nuclei will be all of the heavies
# add all the nuclei into G.node
node_nuclei = []
for n in self.unique_nuclei:
if n.raw not in hidden_nuclei:
node_nuclei.append(n)
else:
for r in self.rates:
if r.reactants.count(n) > 1:
node_nuclei.append(n)
break
if filter_function is not None:
node_nuclei = list(filter(filter_function, node_nuclei))
for n in node_nuclei:
G.add_node(n)
if rotated:
G.position[n] = (n.Z, n.A - 2*n.Z)
else:
G.position[n] = (n.N, n.Z)
G.labels[n] = fr"${n.pretty}$"
# get the rates for each reaction
if rho is not None and T is not None and comp is not None:
ydots = self.evaluate_rates(rho, T, comp)
else:
ydots = None
# Do not show rates on the graph if their corresponding ydot is less than ydot_cutoff_value
invisible_rates = set()
if ydot_cutoff_value is not None:
for r in self.rates:
if ydots[r] < ydot_cutoff_value:
invisible_rates.add(r)
# edges
for n in node_nuclei:
for r in self.nuclei_consumed[n]:
for p in r.products:
if p in node_nuclei:
if hide_xalpha:
# first check is alpha is the heaviest nucleus on the RHS
rhs_heavy = sorted(r.products)[-1]
if not (rhs_heavy.Z == 2 and rhs_heavy.A == 4):
# for rates that are A (x, alpha) B, where A and B are heavy nuclei,
# don't show the connection of the nucleus to alpha, only show it to B
if p.Z == 2 and p.A == 4:
continue
# likewise, hide A (alpha, x) B, unless A itself is an alpha
c = r.reactants
n_alpha = 0
for nuc in c:
if nuc.Z == 2 and nuc.A == 4:
n_alpha += 1
# if there is only 1 alpha and we are working on the alpha node,
# then skip
if n_alpha == 1 and n.Z == 2 and n.A == 4:
continue
# networkx doesn't seem to keep the edges in
# any particular order, so we associate data
# to the edges here directly, in this case,
# the reaction rate, which will be used to
# color it
if ydots is None:
G.add_edges_from([(n, p)], weight=0.5)
else:
if r in invisible_rates:
continue
try:
rate_weight = math.log10(ydots[r])
except ValueError:
# if ydots[r] is zero, then set the weight
# to roughly the minimum exponent possible
# for python floats
rate_weight = -308
except:
raise
G.add_edges_from([(n, p)], weight=rate_weight)
# It seems that networkx broke backwards compatability, and 'zorder' is no longer a valid
# keyword argument. The 'linewidth' argument has also changed to 'linewidths'.
nx.draw_networkx_nodes(G, G.position, # plot the element at the correct position
node_color=node_color, alpha=1.0,
node_shape=node_shape, node_size=node_size, linewidths=2.0, ax=ax)
nx.draw_networkx_labels(G, G.position, G.labels, # label the name of element at the correct position
font_size=node_font_size, font_color="w", ax=ax)
# get the edges and weights coupled in the same order
edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())
edge_color=weights
ww = np.array(weights)
min_weight = ww.min()
max_weight = ww.max()
dw = (max_weight - min_weight)/4
widths = np.ones_like(ww)
widths[ww > min_weight + dw] = 1.5
widths[ww > min_weight + 2*dw] = 2.5
widths[ww > min_weight + 3*dw] = 4
edges_lc = nx.draw_networkx_edges(G, G.position, width=list(widths), # plot the arrow of reaction
edgelist=edges, edge_color=edge_color,
node_size=node_size,
edge_cmap=plt.cm.viridis, ax=ax)
# for networkx <= 2.0 draw_networkx_edges returns a
# LineCollection matplotlib type which we can use for the
# colorbar directly. For networkx >= 2.1, it is a collection
# of FancyArrowPatch-s, which we need to run through a
# PatchCollection. See:
# https://stackoverflow.com/questions/18658047/adding-a-matplotlib-colorbar-from-a-patchcollection
if ydots is not None:
pc = mpl.collections.PatchCollection(edges_lc, cmap=plt.cm.viridis)
pc.set_array(weights)
if not rotated:
plt.colorbar(pc, ax=ax, label="log10(rate)")
else:
plt.colorbar(pc, ax=ax, label="log10(rate)", orientation="horizontal", fraction=0.05)
Ns = [n.N for n in node_nuclei]
Zs = [n.Z for n in node_nuclei]
if not rotated:
ax.set_xlim(min(Ns)-1, max(Ns)+1)
else:
ax.set_xlim(min(Zs)-1, max(Zs)+1)
#plt.ylim(min(Zs)-1, max(Zs)+1)
if not rotated:
plt.xlabel(r"$N$", fontsize="large")
plt.ylabel(r"$Z$", fontsize="large")
else:
plt.xlabel(r"$Z$", fontsize="large")
plt.ylabel(r"$A - 2Z$", fontsize="large")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)<|fim▁hole|> ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if Z_range is not None and N_range is not None:
if not rotated:
ax.set_xlim(N_range[0], N_range[1])
ax.set_ylim(Z_range[0], Z_range[1])
else:
ax.set_xlim(Z_range[0], Z_range[1])
if not rotated:
ax.set_aspect("equal", "datalim")
fig.set_size_inches(size[0]/dpi, size[1]/dpi)
if title is not None:
fig.suptitle(title)
if outfile is None:
plt.show()
else:
plt.tight_layout()
plt.savefig(outfile, dpi=dpi)
@staticmethod
def _safelog(arr, small):
arr = np.copy(arr)
if np.any(arr < 0.0):
raise ValueError("Negative values not allowed for logscale - try symlog instead.")
zeros = arr == 0.0
arr[zeros] = min(small, arr[~zeros].min() / 10)
return np.log10(arr)
@staticmethod
def _symlog(arr, linthresh=1.0):
assert linthresh >= 1.0
neg = arr < 0.0
arr = np.abs(arr)
needslog = arr > linthresh
arr[needslog] = np.log10(arr[needslog]) + linthresh
arr[neg] *= -1
return arr
@staticmethod
def _scale(arr, minval=None, maxval=None):
if minval is None: minval = arr.min()
if maxval is None: maxval = arr.max()
if minval != maxval:
scaled = (arr - minval) / (maxval - minval)
else:
scaled = np.zeros_like(arr)
scaled[scaled < 0.0] = 0.0
scaled[scaled > 1.0] = 1.0
return scaled
def gridplot(self, comp=None, color_field="X", rho=None, T=None, **kwargs):
"""
Plot nuclides as cells on a grid of Z vs. N, colored by *color_field*. If called
without a composition, the function will just plot the grid with no color field.
:param comp: Composition of the environment.
:param color_field: Field to color by. Must be one of 'X' (mass fraction),
'Y' (molar abundance), 'Xdot' (time derivative of X), 'Ydot' (time
derivative of Y), or 'activity' (sum of contributions to Ydot of
all rates, ignoring sign).
:param rho: Density to evaluate rates at. Needed for fields involving time
derivatives.
:param T: Temperature to evaluate rates at. Needed for fields involving time
derivatives.
:Keyword Arguments:
- *scale* -- One of 'linear', 'log', and 'symlog'. Linear by default.
- *small* -- If using logarithmic scaling, zeros will be replaced with
this value. 1e-30 by default.
- *linthresh* -- Linearity threshold for symlog scaling.
- *filter_function* -- A callable to filter Nucleus objects with. Should
return *True* if the nuclide should be plotted.
- *outfile* -- Output file to save the plot to. The plot will be shown if
not specified.
- *dpi* -- DPI to save the image file at.
- *cmap* -- Name of the matplotlib colormap to use. Default is 'magma'.
- *edgecolor* -- Color of grid cell edges.
- *area* -- Area of the figure without the colorbar, in square inches. 64
by default.
- *no_axes* -- Set to *True* to omit axis spines.
- *no_ticks* -- Set to *True* to omit tickmarks.
- *no_cbar* -- Set to *True* to omit colorbar.
- *cbar_label* -- Colorbar label.
- *cbar_bounds* -- Explicit colorbar bounds.
- *cbar_format* -- Format string or Formatter object for the colorbar ticks.
"""
# Process kwargs
outfile = kwargs.pop("outfile", None)
scale = kwargs.pop("scale", "linear")
cmap = kwargs.pop("cmap", "viridis")
edgecolor = kwargs.pop("edgecolor", "grey")
small = kwargs.pop("small", 1e-30)
area = kwargs.pop("area", 64)
no_axes = kwargs.pop("no_axes", False)
no_ticks = kwargs.pop("no_ticks", False)
no_cbar = kwargs.pop("no_cbar", False)
cbar_label = kwargs.pop("cbar_label", None)
cbar_format = kwargs.pop("cbar_format", None)
cbar_bounds = kwargs.pop("cbar_bounds", None)
filter_function = kwargs.pop("filter_function", None)
dpi = kwargs.pop("dpi", 100)
linthresh = kwargs.pop("linthresh", 1.0)
if kwargs: warnings.warn(f"Unrecognized keyword arguments: {kwargs.keys()}")
# Get figure, colormap
fig, ax = plt.subplots()
cmap = mpl.cm.get_cmap(cmap)
# Get nuclei and all 3 numbers
nuclei = self.unique_nuclei
if filter_function is not None:
nuclei = list(filter(filter_function, nuclei))
Ns = np.array([n.N for n in nuclei])
Zs = np.array([n.Z for n in nuclei])
As = Ns + Zs
# Compute weights
color_field = color_field.lower()
if color_field not in {"x", "y", "ydot", "xdot", "activity"}:
raise ValueError(f"Invalid color field: '{color_field}'")
if comp is None:
values = np.zeros(len(nuclei))
elif color_field == "x":
values = np.array([comp.X[nuc] for nuc in nuclei])
elif color_field == "y":
ys = comp.get_molar()
values = np.array([ys[nuc] for nuc in nuclei])
elif color_field in {"ydot", "xdot"}:
if rho is None or T is None:
raise ValueError("Need both rho and T to evaluate rates!")
ydots = self.evaluate_ydots(rho, T, comp)
values = np.array([ydots[nuc] for nuc in nuclei])
if color_field == "xdot": values *= As
elif color_field == "activity":
if rho is None or T is None:
raise ValueError("Need both rho and T to evaluate rates!")
act = self.evaluate_activity(rho, T, comp)
values = np.array([act[nuc] for nuc in nuclei])
if scale == "log": values = self._safelog(values, small)
elif scale == "symlog": values = self._symlog(values, linthresh)
if cbar_bounds is None:
cbar_bounds = values.min(), values.max()
weights = self._scale(values, *cbar_bounds)
# Plot a square for each nucleus
for nuc, weight in zip(nuclei, weights):
square = plt.Rectangle((nuc.N - 0.5, nuc.Z - 0.5), width=1, height=1,
facecolor=cmap(weight), edgecolor=edgecolor)
ax.add_patch(square)
# Set limits
maxN, minN = max(Ns), min(Ns)
maxZ, minZ = max(Zs), min(Zs)
plt.xlim(minN - 0.5, maxN + 0.6)
plt.ylim(minZ - 0.5, maxZ + 0.6)
# Set plot appearance
rat = (maxN - minN) / (maxZ - minZ)
width = np.sqrt(area * rat)
height = area / width
fig.set_size_inches(width, height)
plt.xlabel(r"N $\rightarrow$")
plt.ylabel(r"Z $\rightarrow$")
if no_axes or no_ticks:
plt.tick_params \
(
axis = 'both',
which = 'both',
bottom = False,
left = False,
labelbottom = False,
labelleft = False
)
else:
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if no_axes:
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
# Colorbar stuff
if not no_cbar and comp is not None:
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='3.5%', pad=0.1)
cbar_norm = mpl.colors.Normalize(*cbar_bounds)
smap = mpl.cm.ScalarMappable(norm=cbar_norm, cmap=cmap)
if not cbar_label:
capfield = color_field.capitalize()
if scale == "log":
cbar_label = f"log[{capfield}]"
elif scale == "symlog":
cbar_label = f"symlog[{capfield}]"
else:
cbar_label = capfield
fig.colorbar(smap, cax=cax, orientation="vertical",
label=cbar_label, format=cbar_format)
# Show or save
if outfile is None:
plt.show()
else:
plt.tight_layout()
plt.savefig(outfile, dpi=dpi)
def __repr__(self):
string = ""
for r in self.rates:
string += f"{r.string}\n"
return string
class Explorer:
""" interactively explore a rate collection """
def __init__(self, rc, comp, size=(800, 600),
ydot_cutoff_value=None,
always_show_p=False, always_show_alpha=False):
""" take a RateCollection and a composition """
self.rc = rc
self.comp = comp
self.size = size
self.ydot_cutoff_value = ydot_cutoff_value
self.always_show_p = always_show_p
self.always_show_alpha = always_show_alpha
def _make_plot(self, logrho, logT):
self.rc.plot(rho=10.0**logrho, T=10.0**logT,
comp=self.comp, size=self.size,
ydot_cutoff_value=self.ydot_cutoff_value,
always_show_p=self.always_show_p,
always_show_alpha=self.always_show_alpha)
def explore(self, logrho=(2, 6, 0.1), logT=(7, 9, 0.1)):
"""Perform interactive exploration of the network structure."""
interact(self._make_plot, logrho=logrho, logT=logT)<|fim▁end|> | ax.spines['top'].set_visible(False) |
<|file_name|>upgrade-before.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# This is run by Travis-CI before an upgrade to load some data into the
# database. After the upgrade is complete, the data is verified by
# upgrade-after.py to make sure that the upgrade of the database went smoothly.
#
import logging
import unittest
import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../pynipap')<|fim▁hole|>sys.path.insert(0, '../nipap-cli')
from nipap.backend import Nipap
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
from pynipap import AuthOptions, VRF, Pool, Prefix, NipapNonExistentError, NipapDuplicateError, NipapValueError
import pynipap
pynipap.xmlrpc_uri = 'http://unittest:[email protected]:1337'
o = AuthOptions({
'authoritative_source': 'nipap'
})
class TestHelper:
@classmethod
def clear_database(cls):
cfg = NipapConfig('/etc/nipap/nipap.conf')
n = Nipap()
# have to delete hosts before we can delete the rest
n._execute("DELETE FROM ip_net_plan WHERE masklen(prefix) = 32")
# the rest
n._execute("DELETE FROM ip_net_plan")
# delete all except for the default VRF with id 0
n._execute("DELETE FROM ip_net_vrf WHERE id > 0")
# set default info for VRF 0
n._execute("UPDATE ip_net_vrf SET name = 'default', description = 'The default VRF, typically the Internet.' WHERE id = 0")
n._execute("DELETE FROM ip_net_pool")
n._execute("DELETE FROM ip_net_asn")
def add_prefix(self, prefix, type, description, tags=None):
if tags is None:
tags = []
p = Prefix()
p.prefix = prefix
p.type = type
p.description = description
p.tags = tags
p.save()
return p
class TestLoad(unittest.TestCase):
""" Load some data into the database
"""
def test_load_data(self):
"""
"""
th = TestHelper()
p1 = th.add_prefix('192.168.0.0/16', 'reservation', 'test')
p2 = th.add_prefix('192.168.0.0/20', 'reservation', 'test')
p3 = th.add_prefix('192.168.0.0/24', 'reservation', 'test')
p4 = th.add_prefix('192.168.1.0/24', 'reservation', 'test')
p5 = th.add_prefix('192.168.2.0/24', 'reservation', 'test')
p6 = th.add_prefix('192.168.32.0/20', 'reservation', 'test')
p7 = th.add_prefix('192.168.32.0/24', 'reservation', 'test')
p8 = th.add_prefix('192.168.32.1/32', 'reservation', 'test')
ps1 = th.add_prefix('2001:db8:1::/48', 'reservation', 'test')
ps2 = th.add_prefix('2001:db8:1::/64', 'reservation', 'test')
ps3 = th.add_prefix('2001:db8:2::/48', 'reservation', 'test')
pool1 = Pool()
pool1.name = 'upgrade-test'
pool1.ipv4_default_prefix_length = 31
pool1.ipv6_default_prefix_length = 112
pool1.save()
p2.pool = pool1
p2.save()
ps1.pool = pool1
ps1.save()
pool2 = Pool()
pool2.name = 'upgrade-test2'
pool2.save()
vrf1 = VRF()
vrf1.name = 'foo'
vrf1.rt = '123:123'
vrf1.save()
if __name__ == '__main__':
# set up logging
log = logging.getLogger()
logging.basicConfig()
log.setLevel(logging.INFO)
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()<|fim▁end|> | sys.path.insert(0, '../nipap') |
<|file_name|>parameters.js<|end_file_name|><|fim▁begin|>// getPostsParameters gives an object containing the appropriate find and options arguments for the subscriptions's Posts.find()
getPostsParameters = function (terms) {
var maxLimit = 200;
// console.log(terms)
// note: using jquery's extend() with "deep" parameter set to true instead of shallow _.extend()
// see: http://api.jquery.com/jQuery.extend/
// initialize parameters by extending baseParameters object, to avoid passing it by reference
var parameters = deepExtend(true, {}, viewParameters.baseParameters);
// if view is not defined, default to "top"
var view = !!terms.view ? dashToCamel(terms.view) : 'top';
// get query parameters according to current view
if (typeof viewParameters[view] !== 'undefined')
parameters = deepExtend(true, parameters, viewParameters[view](terms));
// extend sort to sort posts by _id to break ties
deepExtend(true, parameters, {options: {sort: {_id: -1}}});
// if a limit was provided with the terms, add it too (note: limit=0 means "no limit")
if (typeof terms.limit !== 'undefined')
_.extend(parameters.options, {limit: parseInt(terms.limit)});
// limit to "maxLimit" posts at most when limit is undefined, equal to 0, or superior to maxLimit
if(!parameters.options.limit || parameters.options.limit == 0 || parameters.options.limit > maxLimit) {
parameters.options.limit = maxLimit;
}
// hide future scheduled posts unless "showFuture" is set to true or postedAt is already defined
if (!parameters.showFuture && !parameters.find.postedAt)
parameters.find.postedAt = {$lte: new Date()};
// console.log(parameters);
return parameters;
};
getUsersParameters = function(filterBy, sortBy, limit) {
var find = {},
sort = {createdAt: -1};
switch(filterBy){
case 'invited':
// consider admins as invited
find = {$or: [{isInvited: true}, adminMongoQuery]};
break;
case 'uninvited':
find = {$and: [{isInvited: false}, notAdminMongoQuery]};
break;
<|fim▁hole|> break;
}
switch(sortBy){
case 'username':
sort = {username: 1};
break;
case 'karma':
sort = {karma: -1};
break;
case 'postCount':
sort = {postCount: -1};
break;
case 'commentCount':
sort = {commentCount: -1};
case 'invitedCount':
sort = {invitedCount: -1};
}
return {
find: find,
options: {sort: sort, limit: limit}
};
};<|fim▁end|> | case 'admin':
find = adminMongoQuery;
|
<|file_name|>WeatherDao.java<|end_file_name|><|fim▁begin|>package com.wearit.shike.web.model.dao.weather;
import java.util.List;
import com.wearit.shike.web.model.weather.TrackWeather;
import com.wearit.shike.web.model.weather.Weather;
public interface WeatherDao {<|fim▁hole|> * Weather.
*
* @return
*/
public List<Weather> getAllWeather();
/**
* Metodo che viene usato per aggiungere informazioni meteo alla tabella Weather.
*
* @param w
* meteo da aggiungere al database
*/
public void addTrackWeather(TrackWeather tw);
/**
* Metodo che viene usato per estrarre le informazioni meteo di un track.
*
* @param idt
* id del virtual track per ottenere il meteo
* @return il trackweather associato al tracciato richiesto
*/
public List<Weather> getTrackWeather(int idt);
/**
* Metodo che viene usato per estrarre una singola informazione meteo di un track
*
* @param _idt
* id del virtual track per ottenere il meteo
* @param date
* data della previsione meteo
* @return il singolo trackweather associato al tracciato richiesto o null se lista
* trackweather vuota
*/
public Weather getSingleTrackWeather(int _idt, long date);
/**
* Metodo che viene usato per togliere un record dalla tabella Weather.
*
* @param id
* del meteo da cancellare
* @param order
* forecastOrder del meteo da cancellare
*/
public void deleteWeather(int id, long order);
}<|fim▁end|> | /**
* Metodo che viene usato per elencare tutti i record appartenenti alla tabella |
<|file_name|>TDAnnotationDecl.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2015-2016 Didier Villevalois.
*
* This file is part of JLaTo.
*
* JLaTo is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* JLaTo is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with JLaTo. If not, see <http://www.gnu.org/licenses/>.
*/
package org.jlato.internal.td.decl;
import org.jlato.internal.bu.coll.SNodeList;
import org.jlato.internal.bu.decl.SAnnotationDecl;
import org.jlato.internal.bu.name.SName;
import org.jlato.internal.td.TDLocation;
import org.jlato.internal.td.TDTree;
import org.jlato.tree.Kind;
import org.jlato.tree.NodeList;
import org.jlato.tree.Trees;
import org.jlato.tree.decl.AnnotationDecl;
import org.jlato.tree.decl.ExtendedModifier;
import org.jlato.tree.decl.MemberDecl;
import org.jlato.tree.decl.TypeDecl;
import org.jlato.tree.name.Name;
import org.jlato.util.Mutation;
/**
* An annotation type declaration.
*/
public class TDAnnotationDecl extends TDTree<SAnnotationDecl, TypeDecl, AnnotationDecl> implements AnnotationDecl {
/**
* Returns the kind of this annotation type declaration.
*
* @return the kind of this annotation type declaration.
*/
public Kind kind() {
return Kind.AnnotationDecl;
}
/**
* Creates an annotation type declaration for the specified tree location.
*
* @param location the tree location.
*/
public TDAnnotationDecl(TDLocation<SAnnotationDecl> location) {
super(location);
}
/**
* Creates an annotation type declaration with the specified child trees.
*
* @param modifiers the modifiers child tree.
* @param name the name child tree.
* @param members the members child tree.
*/
public TDAnnotationDecl(NodeList<ExtendedModifier> modifiers, Name name, NodeList<MemberDecl> members) {
super(new TDLocation<SAnnotationDecl>(SAnnotationDecl.make(TDTree.<SNodeList>treeOf(modifiers), TDTree.<SName>treeOf(name), TDTree.<SNodeList>treeOf(members))));
}
/**
* Returns the modifiers of this annotation type declaration.
*
* @return the modifiers of this annotation type declaration.
*/
public NodeList<ExtendedModifier> modifiers() {
return location.safeTraversal(SAnnotationDecl.MODIFIERS);
}
/**
* Replaces the modifiers of this annotation type declaration.
*
* @param modifiers the replacement for the modifiers of this annotation type declaration.
* @return the resulting mutated annotation type declaration.
*/
public AnnotationDecl withModifiers(NodeList<ExtendedModifier> modifiers) {
return location.safeTraversalReplace(SAnnotationDecl.MODIFIERS, modifiers);
}
/**
* Mutates the modifiers of this annotation type declaration.
*
* @param mutation the mutation to apply to the modifiers of this annotation type declaration.
* @return the resulting mutated annotation type declaration.
*/
public AnnotationDecl withModifiers(Mutation<NodeList<ExtendedModifier>> mutation) {
return location.safeTraversalMutate(SAnnotationDecl.MODIFIERS, mutation);
}
/**
* Returns the name of this annotation type declaration.
*
* @return the name of this annotation type declaration.
*/
public Name name() {
return location.safeTraversal(SAnnotationDecl.NAME);
}
/**
* Replaces the name of this annotation type declaration.
*
* @param name the replacement for the name of this annotation type declaration.
* @return the resulting mutated annotation type declaration.
*/
public AnnotationDecl withName(Name name) {
return location.safeTraversalReplace(SAnnotationDecl.NAME, name);
}
/**
* Mutates the name of this annotation type declaration.
*
* @param mutation the mutation to apply to the name of this annotation type declaration.
* @return the resulting mutated annotation type declaration.
*/
public AnnotationDecl withName(Mutation<Name> mutation) {
return location.safeTraversalMutate(SAnnotationDecl.NAME, mutation);
}
/**
* Replaces the name of this annotation type declaration.
*
* @param name the replacement for the name of this annotation type declaration.
* @return the resulting mutated annotation type declaration.<|fim▁hole|> */
public AnnotationDecl withName(String name) {
return location.safeTraversalReplace(SAnnotationDecl.NAME, Trees.name(name));
}
/**
* Returns the members of this annotation type declaration.
*
* @return the members of this annotation type declaration.
*/
public NodeList<MemberDecl> members() {
return location.safeTraversal(SAnnotationDecl.MEMBERS);
}
/**
* Replaces the members of this annotation type declaration.
*
* @param members the replacement for the members of this annotation type declaration.
* @return the resulting mutated annotation type declaration.
*/
public AnnotationDecl withMembers(NodeList<MemberDecl> members) {
return location.safeTraversalReplace(SAnnotationDecl.MEMBERS, members);
}
/**
* Mutates the members of this annotation type declaration.
*
* @param mutation the mutation to apply to the members of this annotation type declaration.
* @return the resulting mutated annotation type declaration.
*/
public AnnotationDecl withMembers(Mutation<NodeList<MemberDecl>> mutation) {
return location.safeTraversalMutate(SAnnotationDecl.MEMBERS, mutation);
}
}<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url
from externals.views import (
PartnerVendorNumberAPIView,
PartnerExternalDetailsAPIView,
PartnerBasicInfoAPIView,
)
urlpatterns = [
url(r'^vendor-number/partner/$', PartnerVendorNumberAPIView.as_view(), name="vendor-number-create"),
url(r'^vendor-number/partner/(?P<pk>\d+)/$', PartnerVendorNumberAPIView.as_view(), name="vendor-number-details"),
url(
r'^partner-details/(?P<agency_id>\d+)/(?P<partner_id>\d+)/$',
PartnerExternalDetailsAPIView.as_view(),
name="partner-external-details"
),
url(
r'^partner-basic-info/$',
PartnerBasicInfoAPIView.as_view(),
name="partner-basic-info"
),<|fim▁hole|><|fim▁end|> | ] |
<|file_name|>incr_transfer_tree.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import subprocess
import sys
from test_util import TestFailedError, run_command, \
serializeIncrParseMarkupFile
<|fim▁hole|> formatter_class=argparse.RawDescriptionHelpFormatter,
description='Utility for testing incremental syntax tree transfer',
epilog='''
Based of a single template the utility generates a pre-edit and a post-edit
file. It then verifies that the incrementally transferred syntax tree
matches the syntax tree passed as --expected-incremental-syntax-tree.
To generate the pre-edit and the post-edit file from the template, it
operates on markers of the form:
<<test_case<pre|||post>>>
These placeholders are replaced by:
- 'pre' if a different test case than 'test_case' is run
- 'pre' for the pre-edit version of 'test_case'
- 'post' for the post-edit version of 'test_case'
''')
parser.add_argument(
'file', type=argparse.FileType(),
help='The template file to test')
parser.add_argument(
'--test-case', default='',
help='The test case to execute. If no test case is specified all '
'unnamed substitutions are applied')
parser.add_argument(
'--temp-dir', required=True,
help='A temporary directory where pre-edit and post-edit files can be '
'saved')
parser.add_argument(
'--swift-syntax-test', required=True,
help='The path to swift-syntax-test')
parser.add_argument(
'--expected-incremental-syntax-tree', required=True,
help='The path to a file that contains the expected incrementally '
'transferred syntax tree')
args = parser.parse_args(sys.argv[1:])
test_file = args.file.name
test_file_name = os.path.basename(test_file)
test_case = args.test_case
temp_dir = args.temp_dir
swift_syntax_test = args.swift_syntax_test
expected_syntax_tree_file = args.expected_incremental_syntax_tree
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
incremental_serialized_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.incr.json'
try:
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode='incremental',
serialization_mode='incremental',
omit_node_ids=False,
output_file=incremental_serialized_file,
temp_dir=temp_dir + '/temp',
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=False)
except TestFailedError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print(e.message, file=sys.stderr)
sys.exit(1)
# Check if the two syntax trees are the same
try:
run_command(
[
'diff', '-u',
incremental_serialized_file,
expected_syntax_tree_file
])
except subprocess.CalledProcessError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print('Syntax tree of incremental parsing does not match expected '
'incrementally transfer syntax tree:\n\n', file=sys.stderr)
print(e.output, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()<|fim▁end|> |
def main():
parser = argparse.ArgumentParser( |
<|file_name|>Grid.js<|end_file_name|><|fim▁begin|>/**
* @jsx React.DOM
* @copyright Prometheus Research, LLC 2014
*/
"use strict";
var React = require('react/addons');
var PropTypes = React.PropTypes;
var Header = require('./Header');
var Viewport = require('./Viewport');
var ColumnMetrics = require('./ColumnMetrics');
var DOMMetrics = require('./DOMMetrics');
var GridScrollMixin = {
componentDidMount() {
this._scrollLeft = this.refs.viewport.getScroll().scrollLeft;
this._onScroll();
},
<|fim▁hole|>
componentWillMount() {
this._scrollLeft = undefined;
},
componentWillUnmount() {
this._scrollLeft = undefined;
},
onScroll({scrollLeft}) {
if (this._scrollLeft !== scrollLeft) {
this._scrollLeft = scrollLeft;
this._onScroll();
}
},
_onScroll() {
if (this._scrollLeft !== undefined) {
this.refs.header.setScrollLeft(this._scrollLeft);
this.refs.viewport.setScrollLeft(this._scrollLeft);
}
}
};
var Grid = React.createClass({
mixins: [
GridScrollMixin,
ColumnMetrics.Mixin,
DOMMetrics.MetricsComputatorMixin
],
propTypes: {
rows: PropTypes.oneOfType([PropTypes.array, PropTypes.func]).isRequired,
columns: PropTypes.array.isRequired
},
getStyle: function(){
return{
overflowX: 'scroll',
overflowY: 'hidden',
outline: 0,
position: 'relative',
minHeight: this.props.minHeight
}
},
render() {
var headerRows = this.props.headerRows || [{ref : 'row'}];
return (
<div {...this.props} style={this.getStyle()} className="react-grid-Grid">
<Header
ref="header"
columns={this.state.columns}
onColumnResize={this.onColumnResize}
height={this.props.rowHeight}
totalWidth={this.DOMMetrics.gridWidth()}
headerRows={headerRows}
/>
<Viewport
ref="viewport"
width={this.state.columns.width}
rowHeight={this.props.rowHeight}
rowRenderer={this.props.rowRenderer}
cellRenderer={this.props.cellRenderer}
rows={this.props.rows}
selectedRows={this.props.selectedRows}
expandedRows={this.props.expandedRows}
length={this.props.length}
columns={this.state.columns}
totalWidth={this.DOMMetrics.gridWidth()}
onScroll={this.onScroll}
onRows={this.props.onRows}
rowOffsetHeight={this.props.rowOffsetHeight || this.props.rowHeight * headerRows.length}
/>
</div>
);
},
getDefaultProps() {
return {
rowHeight: 35,
minHeight: 350
};
},
});
module.exports = Grid;<|fim▁end|> | componentDidUpdate() {
this._onScroll();
}, |
<|file_name|>version.js<|end_file_name|><|fim▁begin|>/**
* Created by Guoliang Cui on 2015/5/5.
*/
var utility=require('../lib/utility');<|fim▁hole|> var resultObj;
resultObj=utility.jsonResult(false,"OK",{version:ConfigInfo.basicSettings[params.key]});
res.send(resultObj);
}<|fim▁end|> | var ConfigInfo=require("../../config/baseConfig")
exports.getVersion=function(req,res){
var params=utility.parseParams(req).query; |
<|file_name|>loading.js<|end_file_name|><|fim▁begin|>import './chunk-1fafdf15.js';
import { merge } from './helpers.js';
import { V as VueInstance } from './chunk-bd4264c6.js';
import { r as registerComponent, a as registerComponentProgrammatic, u as use } from './chunk-cca88db8.js';
import './chunk-b9bdb0e4.js';
import { L as Loading } from './chunk-1d4d79a1.js';
export { L as BLoading } from './chunk-1d4d79a1.js';
var localVueInstance;
var LoadingProgrammatic = {
open: function open(params) {
var defaultParam = {
programmatic: true
};<|fim▁hole|> return new LoadingComponent({
el: document.createElement('div'),
propsData: propsData
});
}
};
var Plugin = {
install: function install(Vue) {
localVueInstance = Vue;
registerComponent(Vue, Loading);
registerComponentProgrammatic(Vue, 'loading', LoadingProgrammatic);
}
};
use(Plugin);
export default Plugin;
export { LoadingProgrammatic };<|fim▁end|> | var propsData = merge(defaultParam, params);
var vm = typeof window !== 'undefined' && window.Vue ? window.Vue : localVueInstance || VueInstance;
var LoadingComponent = vm.extend(Loading); |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url
from dojo.engagement import views
urlpatterns = [
# engagements and calendar
url(r'^calendar$', views.engagement_calendar, name='calendar'),
url(r'^calendar/engagements$', views.engagement_calendar, name='engagement_calendar'),<|fim▁hole|> url(r'^engagement/(?P<eid>\d+)$', views.view_engagement,
name='view_engagement'),
url(r'^engagement/(?P<eid>\d+)/ics$', views.engagement_ics,
name='engagement_ics'),
url(r'^engagement/(?P<eid>\d+)/edit$', views.edit_engagement,
name='edit_engagement'),
url(r'^engagement/(?P<eid>\d+)/delete$', views.delete_engagement,
name='delete_engagement'),
url(r'^engagement/(?P<eid>\d+)/add_tests$', views.add_tests,
name='add_tests'),
url(r'^engagement/(?P<eid>\d+)/import_scan_results$',
views.import_scan_results, name='import_scan_results'),
url(r'^engagement/(?P<eid>\d+)/close$', views.close_eng,
name='close_engagement'),
url(r'^engagement/(?P<eid>\d+)/reopen$', views.reopen_eng,
name='reopen_engagement'),
url(r'^engagement/(?P<eid>\d+)/complete_checklist$',
views.complete_checklist, name='complete_checklist'),
url(r'^engagement/(?P<eid>\d+)/upload_risk_acceptance$',
views.upload_risk, name='upload_risk_acceptance$'),
url(r'^engagement/(?P<eid>\d+)/risk_approval/(?P<raid>\d+)$',
views.view_risk, name='view_risk'),
url(r'^engagement/(?P<eid>\d+)/risk_approval/(?P<raid>\d+)/delete$',
views.delete_risk, name='delete_risk'),
url(r'^engagement/(?P<eid>\d+)/risk_approval/(?P<raid>\d+)/download$',
views.download_risk, name='download_risk'),
url(r'^engagement/(?P<eid>\d+)/threatmodel$', views.view_threatmodel,
name='view_threatmodel'),
url(r'^engagement/(?P<eid>\d+)/threatmodel/upload$',
views.upload_threatmodel, name='upload_threatmodel'),
]<|fim▁end|> | url(r'^engagement$', views.engagement, name='engagement'),
url(r'^engagement/new$', views.new_engagement, name='new_eng'), |
<|file_name|>actual.js<|end_file_name|><|fim▁begin|>declare class X {<|fim▁hole|><|fim▁end|> | a: number;
static b: number;
c: number;
} |
<|file_name|>pad-string.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>export default function padString(input: string): string;<|fim▁end|> | |
<|file_name|>logging_test.py<|end_file_name|><|fim▁begin|>from pathlib import Path
import pytest
from loguru import logger
from libretime_shared.logging import (
DEBUG,
INFO,
create_task_logger,
level_from_name,
setup_logger,
)
@pytest.mark.parametrize(
"name,level_name,level_no",
[
("error", "error", 40),
("warning", "warning", 30),
("info", "info", 20),
("debug", "debug", 10),
("trace", "trace", 5),<|fim▁hole|>def test_level_from_name(name, level_name, level_no):
level = level_from_name(name)
assert level.name == level_name
assert level.no == level_no
def test_level_from_name_invalid():
with pytest.raises(ValueError):
level_from_name("invalid")
def test_setup_logger(tmp_path: Path):
log_filepath = tmp_path / "test.log"
extra_log_filepath = tmp_path / "extra.log"
setup_logger(INFO, log_filepath)
extra_logger = create_task_logger(DEBUG, extra_log_filepath, True)
logger.info("test info")
extra_logger.info("extra info")
logger.debug("test debug")
extra_logger.complete()
logger.complete()
assert len(log_filepath.read_text(encoding="utf-8").splitlines()) == 1
assert len(extra_log_filepath.read_text(encoding="utf-8").splitlines()) == 1<|fim▁end|> | ],
) |
<|file_name|>RemoveDuplicatesfromSortedList.java<|end_file_name|><|fim▁begin|>package remove_duplicates_from_sorted_list;
import common.ListNode;
public class RemoveDuplicatesfromSortedList {
public class Solution {
public ListNode deleteDuplicates(ListNode head) {
if (head != null) {
ListNode pre = head;
ListNode p = pre.next;
while (p != null) {
if (p.val == pre.val) {
pre.next = p.next;
} else {
pre = p;
}
p = p.next;
}
}
return head;
}
}
public static class UnitTest {<|fim▁hole|>
}
}<|fim▁end|> | |
<|file_name|>labeledStatementWithLabel.ts<|end_file_name|><|fim▁begin|>// @lib: es2015
label: function fn() { }
label: function* gen() { }
label: async function gen1() { }
label: enum E {}
label: interface I {}
label: class C { }
label: var a = 1;
label: let b = 1;
label: const c = 1;
label: module M { }<|fim▁hole|><|fim▁end|> | label: namespace N {}
label: type T = {} |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import TemplateView
from farxiv.forms import *
class Index(TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
context['user']=self.request.user
return context
class SubmitFarticle(TemplateView):
template_name = 'submit.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
context['form'] = FarticleForm(instance=self.request.user)
return context
class ViewFarticle(TemplateView):
template_name='view.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
farticle_id = self.kwargs['fid']
farticle = Farticle.objects.get(id = farticle_id)
context['author'] = farticle.author
context['title'] = farticle.title
if farticle.render_type=="quick":
# Does this actually work? Doubt it...
template_name='view.html'
farticle = QuickFarticle.objects.get(id = farticle_id)
context['steps'] = farticle.steps
context['problems'] = farticle.problems
context['suggestions'] = farticle.suggestions
return context
else:
return context
class ViewExampleFarticle(TemplateView):<|fim▁hole|><|fim▁end|> | template_name='farticle.html' |
<|file_name|>ElementLoader.go<|end_file_name|><|fim▁begin|>package loaders
import (
"encoding/json"
"io/ioutil"
"github.com/ftcjeff/ConfigurationProcessor/logger"
"github.com/ftcjeff/ConfigurationProcessor/types"
)
func ElementLoader(id int, fileName string, channel chan types.ElementChannel) {
defer logger.Trace(logger.Enter())
raw, err := ioutil.ReadFile(fileName)
if err != nil {
panic(err)
}
var e types.ElementDataType
err = json.Unmarshal(raw, &e)
if err != nil {<|fim▁hole|> var ec types.ElementChannel
ec.Id = id
ec.Element = e
channel <- ec
}<|fim▁end|> | panic(err)
}
|
<|file_name|>tanh.rs<|end_file_name|><|fim▁begin|>#![feature(test)]
extern crate test;
extern crate collenchyma as co;
extern crate collenchyma_nn as co_nn;
extern crate rand;
use test::Bencher;
use co::prelude::*;
use co_nn::*;
use rand::{thread_rng, Rng};
fn backend() -> Backend<Native> {
Backend::<Native>::default().unwrap()
}
fn arguments<T: IFramework + Clone>(backend: &Backend<T>, size: usize) -> (SharedTensor<f32>, SharedTensor<f32>) {
let mut rng = thread_rng();
let slice_x = rng.gen_iter::<f32>().take(size).collect::<Vec<f32>>();
let mut x = SharedTensor::<f32>::new(backend.device(), &size).unwrap();
let out = SharedTensor::<f32>::new(backend.device(), &size).unwrap();
x.get_mut(backend.device()).unwrap().as_mut_native().unwrap().as_mut_slice().clone_from_slice(&slice_x);
(x, out)
}
fn arguments_grad<T: IFramework + Clone>(backend: &Backend<T>, size: usize) -> (SharedTensor<f32>, SharedTensor<f32>, SharedTensor<f32>, SharedTensor<f32>) {
let mut rng = thread_rng();
let slice_x = rng.gen_iter::<f32>().take(size).collect::<Vec<f32>>();
let mut x = SharedTensor::<f32>::new(backend.device(), &size).unwrap();
let mut dx = SharedTensor::<f32>::new(backend.device(), &size).unwrap();
let mut out = SharedTensor::<f32>::new(backend.device(), &size).unwrap();
let dout = SharedTensor::<f32>::new(backend.device(), &size).unwrap();
x.get_mut(backend.device()).unwrap().as_mut_native().unwrap().as_mut_slice().clone_from_slice(&slice_x);
dx.get_mut(backend.device()).unwrap().as_mut_native().unwrap().as_mut_slice().clone_from_slice(&slice_x);
out.get_mut(backend.device()).unwrap().as_mut_native().unwrap().as_mut_slice().clone_from_slice(&slice_x);
(x, dx, out, dout)
}
#[inline(never)]
fn bench_profile<F: FnMut() -> ()>(
b: &mut Bencher,
mut bench_func: F,
times: usize
) {
b.iter(|| { for _ in 0..times { bench_func(); } });
}
#[bench]
fn bench_1000_tanh_100_native(b: &mut Bencher) {
let backend = backend();
let (mut x, mut out) = arguments(&backend, 100);
let mut func = || { let _ = backend.tanh_plain(&mut x, &mut out); };
{ func(); bench_profile(b, func, 1000); }
}
#[bench]
fn bench_10_tanh_10000_native(b: &mut Bencher) {
let backend = backend();
let (mut x, mut out) = arguments(&backend, 10000);
let mut func = || { let _ = backend.tanh_plain(&mut x, &mut out); };
{ func(); bench_profile(b, func, 10); }
}
#[bench]
fn bench_1000_tanh_grad_100_native(b: &mut Bencher) {
let backend = backend();
let (mut x, mut dx, mut out, mut dout) = arguments_grad(&backend, 100);
let mut func = || { let _ = backend.tanh_grad_plain(&mut x, &mut dx, &mut out, &mut dout); };
{ func(); bench_profile(b, func, 1000); }
}
<|fim▁hole|> let mut func = || { let _ = backend.tanh_grad_plain(&mut x, &mut dx, &mut out, &mut dout); };
{ func(); bench_profile(b, func, 10); }
}<|fim▁end|> | #[bench]
fn bench_10_tanh_grad_10000_native(b: &mut Bencher) {
let backend = backend();
let (mut x, mut dx, mut out, mut dout) = arguments_grad(&backend, 10000); |
<|file_name|>create_signercert.go<|end_file_name|><|fim▁begin|>package admin
import (
"errors"
"fmt"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/openshift/origin/pkg/cmd/server/crypto"
)
type CreateSignerCertOptions struct {
CertFile string
KeyFile string
SerialFile string
Name string
Overwrite bool
}
func BindSignerCertOptions(options *CreateSignerCertOptions, flags *pflag.FlagSet, prefix string) {
flags.StringVar(&options.CertFile, prefix+"cert", "openshift.local.certificates/ca/cert.crt", "The certificate file.")
flags.StringVar(&options.KeyFile, prefix+"key", "openshift.local.certificates/ca/key.key", "The key file.")
flags.StringVar(&options.SerialFile, prefix+"serial", "openshift.local.certificates/ca/serial.txt", "The serial file that keeps track of how many certs have been signed.")
flags.StringVar(&options.Name, prefix+"name", DefaultSignerName(), "The name of the signer.")
flags.BoolVar(&options.Overwrite, prefix+"overwrite", options.Overwrite, "Overwrite existing cert files if found. If false, any existing file will be left as-is.")
}
func NewCommandCreateSignerCert() *cobra.Command {
options := &CreateSignerCertOptions{Overwrite: true}
cmd := &cobra.Command{
Use: "create-signer-cert",
Short: "Create signer certificate",
Run: func(c *cobra.Command, args []string) {
if err := options.Validate(args); err != nil {
fmt.Println(err.Error())
c.Help()
return
}
if _, err := options.CreateSignerCert(); err != nil {
glog.Fatal(err)
}
},
}
BindSignerCertOptions(options, cmd.Flags(), "")
return cmd
}
func (o CreateSignerCertOptions) Validate(args []string) error {
if len(args) != 0 {
return errors.New("no arguments are supported")
}
if len(o.CertFile) == 0 {
return errors.New("cert must be provided")
}
if len(o.KeyFile) == 0 {
return errors.New("key must be provided")
}
if len(o.SerialFile) == 0 {
return errors.New("serial must be provided")
}
if len(o.Name) == 0 {
return errors.New("name must be provided")
}
return nil
}
func (o CreateSignerCertOptions) CreateSignerCert() (*crypto.CA, error) {
glog.V(2).Infof("Creating a signer cert with: %#v", o)
<|fim▁hole|> return crypto.MakeCA(o.CertFile, o.KeyFile, o.SerialFile, o.Name)
} else {
return crypto.EnsureCA(o.CertFile, o.KeyFile, o.SerialFile, o.Name)
}
}<|fim▁end|> | if o.Overwrite { |
<|file_name|>options.rs<|end_file_name|><|fim▁begin|>//! This module contains the configuration of the application.
//!
//! All options are passed individually to each function and are not bundled
//! together.
//!
//! # Examples
//!
//! ```no_run
//! # use doh::Options;
//! let options = Options::parse();
//! println!("Showing {}", options.remote_dir);
//! ```
use clap::{AppSettings, Arg};
use reqwest::Url;
/// Representation of the application's all configurable values.
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub struct Options {
/// Remote directory to start on.
pub remote_dir: Url,
}
impl Options {
/// Parse `env`-wide command-line arguments into an `Options` instance
pub fn parse() -> Options {
let matches = app_from_crate!("\n")
.setting(AppSettings::ColoredHelp)
.arg(Arg::from_usage("<URL> 'Remote directory to browse'").validator(Options::url_validator))
.get_matches();<|fim▁hole|> Options {
remote_dir: Url::parse(u)
.or_else(|_| Url::parse(&format!("http://{}", u)))
.unwrap(),
}
}
fn url_validator(s: String) -> Result<(), String> {
Url::parse(&s)
.or_else(|_| Url::parse(&format!("http://{}", s)))
.map(|_| ())
.map_err(|e| e.to_string())
}
}<|fim▁end|> |
let u = matches.value_of("URL").unwrap(); |
<|file_name|>value.rs<|end_file_name|><|fim▁begin|>use std::ffi::CString;
use std::{fmt, mem, ptr};
use std::ops::{Deref, Index};
use libc::{c_char, c_int, c_uint};
use ffi::core;
use ffi::prelude::LLVMValueRef;
use ffi::LLVMAttribute;
use ffi::core::{
LLVMConstStringInContext,
LLVMConstStructInContext,
LLVMConstVector,
LLVMGetValueName,
LLVMSetValueName,
LLVMGetElementType,
LLVMGetEntryBasicBlock,
LLVMAppendBasicBlockInContext,
LLVMAddAttribute,
LLVMGetAttribute,
LLVMRemoveAttribute,
LLVMAddFunctionAttr,
LLVMGetFunctionAttr,
LLVMRemoveFunctionAttr,
LLVMGetParam,
LLVMCountParams,
LLVMGetFirstParam,
LLVMGetNextParam,
LLVMGetInitializer,
LLVMSetInitializer,
LLVMIsAGlobalValue,
LLVMGetUndef,
LLVMTypeOf,
};
use block::BasicBlock;
use context::{Context, GetContext};
use util::{self, CastFrom};
use ty::{FunctionType, Type};
/// A typed value that can be used as an operand in instructions.
pub struct Value;
native_ref!(&Value = LLVMValueRef);
impl_display!(Value, LLVMPrintValueToString);
impl Value
{
/// Create a new constant struct from the values given.
pub fn new_struct<'a>(context: &'a Context,
vals: &[&'a Value], packed: bool) -> &'a Value
{
unsafe {
LLVMConstStructInContext(context.into(),
vals.as_ptr() as *mut LLVMValueRef,
vals.len() as c_uint,
packed as c_int)
}.into()
}
/// Create a new constant vector from the values given.
pub fn new_vector<'a>(vals: &[&'a Value]) -> &'a Value
{
unsafe {
LLVMConstVector(vals.as_ptr() as *mut LLVMValueRef,
vals.len() as c_uint)
}.into()
}
/// Create a new constant C string from the text given.
pub fn new_string<'a>(context: &'a Context,
text: &str,
rust_style: bool) -> &'a Value
{
unsafe {
let ptr = text.as_ptr() as *const c_char;
let len = text.len() as c_uint;
LLVMConstStringInContext(context.into(), ptr, len, rust_style as c_int).into()
}
}
/// Create a new constant undefined value of the given type.
pub fn new_undef<'a>(ty: &'a Type) -> &'a Value
{
unsafe { LLVMGetUndef(ty.into()) }.into()
}
/// Returns the name of this value, or `None` if it lacks a name
pub fn get_name(&self) -> Option<&str>
{
unsafe {
let c_name = LLVMGetValueName(self.into());
util::to_null_str(c_name as *mut i8)
}
}
/// Sets the name of this value
pub fn set_name(&self, name: &str)
{
let c_name = CString::new(name).unwrap();
unsafe {
LLVMSetValueName(self.into(), c_name.as_ptr())
}
}
/// Returns the type of this value
pub fn get_type(&self) -> &Type
{
unsafe { LLVMTypeOf(self.into()) }.into()
}
}
pub struct GlobalValue;
native_ref!(&GlobalValue = LLVMValueRef);
deref!(GlobalValue, Value);
impl GlobalValue
{
/// Sets the initial value for this global.
pub fn set_initializer(&self, value: &Value)
{
unsafe { LLVMSetInitializer(self.into(), value.into()) }
}
/// Gets the initial value for this global.
pub fn get_initializer(&self) -> &Value
{
unsafe { LLVMGetInitializer(self.into()) }.into()
}
}
impl CastFrom for GlobalValue
{
type From = Value;
fn cast<'a>(val: &'a Value) -> Option<&'a GlobalValue>
{
let gv = unsafe { LLVMIsAGlobalValue(val.into()) };
if gv == ptr::null_mut() {
None
} else {
Some(gv.into())
}
}
}
/// Comparative operations on values.
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum Predicate
{
Equal,
NotEqual,
GreaterThan,
GreaterThanOrEqual,
LessThan,
LessThanOrEqual
}
/// A function argument.
pub struct Arg;
native_ref!(&Arg = LLVMValueRef);
impl Deref for Arg
{
type Target = Value;
fn deref(&self) -> &Value
{
unsafe { mem::transmute(self) }
}
}
impl Arg
{
/// Add the attribute given to this argument.
pub fn add_attribute(&self, attr: Attribute)
{
unsafe { LLVMAddAttribute(self.into(), attr.into()) }
}
/// Add all the attributes given to this argument.
pub fn add_attributes(&self, attrs: &[Attribute])
{
let mut sum = LLVMAttribute::empty();
for attr in attrs {
let attr:LLVMAttribute = (*attr).into();
sum = sum | attr;
}
unsafe { LLVMAddAttribute(self.into(), sum.into()) }
}
/// Returns true if this argument has the attribute given.
pub fn has_attribute(&self, attr: Attribute) -> bool
{
unsafe {
let other = LLVMGetAttribute(self.into());
other.contains(attr.into())
}
}
/// Returns true if this argument has all the attributes given.
pub fn has_attributes(&self, attrs: &[Attribute]) -> bool
{
unsafe {
let other = LLVMGetAttribute(self.into());
for &attr in attrs {
if !other.contains(attr.into()) {
return false;
}
}
return true;
}
}
/// Remove an attribute from this argument.
pub fn remove_attribute(&self, attr: Attribute)
{
unsafe {
LLVMRemoveAttribute(self.into(), attr.into())
}
}
}
/// A function that can be called and contains blocks.
pub struct Function;
native_ref!(&Function = LLVMValueRef);
impl Deref for Function
{
type Target = Value;
fn deref(&self) -> &Value
{
unsafe { mem::transmute(self) }
}
}
impl Index<usize> for Function
{
type Output = Arg;
fn index(&self, index: usize) -> &Arg
{
unsafe {
if index < LLVMCountParams(self.into()) as usize {
LLVMGetParam(self.into(), index as c_uint).into()
} else {
panic!("no such index {} on {:?}", index, self.get_type())
}
}
}
}
impl CastFrom for Function
{
type From = Value;
fn cast<'a>(val: &'a Value) -> Option<&'a Function>
{
let ty = val.get_type();
let mut is_func = ty.is_function();
if let Some(elem) = ty.get_element() {
is_func = is_func || elem.is_function()
}
if is_func {
Some(unsafe { mem::transmute(val) })
} else {
None
}
}
}
impl Function
{
/// Add a basic block with the name given to the function and return it.
pub fn append<'a>(&'a self, name: &str) -> &'a BasicBlock
{
util::with_cstr(name, |ptr| unsafe {
LLVMAppendBasicBlockInContext(self.get_context().into(), self.into(), ptr).into()
})
}
/// Returns the entry block of this function or `None` if there is none.
pub fn get_entry(&self) -> Option<&BasicBlock>
{
unsafe { mem::transmute(LLVMGetEntryBasicBlock(self.into())) }
}
/// Returns the name of this function.
pub fn get_name(&self) -> &str
{
unsafe {
let c_name = LLVMGetValueName(self.into());
util::to_str(c_name as *mut i8)<|fim▁hole|>
/// Returns the function signature representing this function's signature.
pub fn get_signature(&self) -> &FunctionType
{
unsafe {
let ty = LLVMTypeOf(self.into());
LLVMGetElementType(ty).into()
}
}
/// Returns the number of function parameters
pub fn num_params(&self) -> usize
{
unsafe {
LLVMCountParams(self.into()) as usize
}
}
/// Add the attribute given to this function.
pub fn add_attribute(&self, attr: Attribute)
{
unsafe { LLVMAddFunctionAttr(self.into(), attr.into()) }
}
/// Add all the attributes given to this function.
pub fn add_attributes(&self, attrs: &[Attribute])
{
let mut sum = LLVMAttribute::empty();
for attr in attrs {
let attr:LLVMAttribute = (*attr).into();
sum = sum | attr;
}
unsafe { LLVMAddFunctionAttr(self.into(), sum.into()) }
}
/// Returns true if the attribute given is set in this function.
pub fn has_attribute(&self, attr: Attribute) -> bool
{
unsafe {
let other = LLVMGetFunctionAttr(self.into());
other.contains(attr.into())
}
}
/// Returns true if all the attributes given is set in this function.
pub fn has_attributes(&self, attrs: &[Attribute]) -> bool
{
unsafe {
let other = LLVMGetFunctionAttr(self.into());
for &attr in attrs {
if !other.contains(attr.into()) {
return false;
}
}
return true;
}
}
/// Remove the attribute given from this function.
pub fn remove_attribute(&self, attr: Attribute)
{
unsafe { LLVMRemoveFunctionAttr(self.into(), attr.into()) }
}
}
impl<'a> IntoIterator for &'a Function
{
type Item = &'a Arg;
type IntoIter = ValueIter<'a, &'a Arg>;
/// Iterate through the functions in the module
fn into_iter(self) -> ValueIter<'a, &'a Arg>
{
ValueIter::new(
unsafe { LLVMGetFirstParam(self.into()) },
LLVMGetNextParam)
}
}
impl GetContext for Function
{
fn get_context(&self) -> &Context
{
self.get_type().get_context()
}
}
/// A way of indicating to LLVM how you want arguments / functions to be handled.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
#[repr(C)]
pub enum Attribute
{
/// Zero-extended before or after call.
ZExt = 0b1,
/// Sign-extended before or after call.
SExt = 0b10,
/// Mark the function as not returning.
NoReturn = 0b100,
/// Force argument to be passed in register.
InReg = 0b1000,
/// Hidden pointer to structure to return.
StructRet = 0b10000,
/// Function doesn't unwind stack.
NoUnwind = 0b100000,
/// Consider to not alias after call.
NoAlias = 0b1000000,
/// Pass structure by value.
ByVal = 0b10000000,
/// Nested function static chain.
Nest = 0b100000000,
/// Function doesn't access memory.
ReadNone = 0b1000000000,
/// Function only reads from memory.
ReadOnly = 0b10000000000,
/// Never inline this function.
NoInline = 0b100000000000,
/// Always inline this function.
AlwaysInline = 0b1000000000000,
/// Optimize this function for size.
OptimizeForSize = 0b10000000000000,
/// Stack protection.
StackProtect = 0b100000000000000,
/// Stack protection required.
StackProtectReq = 0b1000000000000000,
/// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias 0 means unaligned (different from align(1)).
Alignment = 0b10000000000000000,
/// Function creates no aliases of pointer.
NoCapture = 0b100000000000000000,
/// Disable redzone.
NoRedZone = 0b1000000000000000000,
/// Disable implicit float instructions.
NoImplicitFloat = 0b10000000000000000000,
/// Naked function.
Naked = 0b100000000000000000000,
/// The source language has marked this function as inline.
InlineHint = 0b1000000000000000000000,
/// Alignment of stack for function (3 bits) stored as log2 of alignment with +1 bias 0 means unaligned (different from alignstack=(1)).
StackAlignment = 0b11100000000000000000000000000,
/// This function returns twice.
ReturnsTwice = 0b100000000000000000000000000000,
/// Function must be in unwind table.
UWTable = 0b1000000000000000000000000000000,
/// Function is called early/often, so lazy binding isn't effective.
NonLazyBind = 0b10000000000000000000000000000000
}
impl From<LLVMAttribute> for Attribute
{
fn from(attr: LLVMAttribute) -> Attribute
{
unsafe { mem::transmute(attr) }
}
}
impl From<Attribute> for LLVMAttribute
{
fn from(attr: Attribute) -> LLVMAttribute
{
unsafe { mem::transmute(attr) }
}
}
impl GetContext for Value
{
fn get_context(&self) -> &Context
{
self.get_type().get_context()
}
}
/// Value Iterator implementation.
///
/// T can be all descendent types of LLVMValueRef.
#[derive(Copy, Clone)]
pub struct ValueIter<'a, T: From<LLVMValueRef>> {
cur : LLVMValueRef,
step : unsafe extern "C" fn(LLVMValueRef) -> LLVMValueRef,
marker1: ::std::marker::PhantomData<&'a ()>,
marker2: ::std::marker::PhantomData<T>,
}
impl<'a, T: From<LLVMValueRef>> ValueIter<'a, T>
{
pub fn new(cur: LLVMValueRef,
step: unsafe extern "C" fn(LLVMValueRef) -> LLVMValueRef) -> Self
{
ValueIter {
cur: cur,
step: step,
marker1: ::std::marker::PhantomData,
marker2: ::std::marker::PhantomData
}
}
}
impl<'a, T: From<LLVMValueRef>> Iterator for ValueIter<'a, T>
{
type Item = T;
fn next(&mut self) -> Option<T>
{
let old: LLVMValueRef = self.cur;
if !old.is_null() {
self.cur = unsafe { (self.step)(old) };
Some(old.into())
} else {
None
}
}
}<|fim▁end|> | }
} |
<|file_name|>FormRecordLoaderTask.java<|end_file_name|><|fim▁begin|>package org.commcare.tasks;
import android.content.Context;
import android.text.TextUtils;
import android.text.format.DateUtils;
import android.util.Pair;
import org.commcare.CommCareApplication;
import org.commcare.models.AndroidSessionWrapper;
import org.commcare.models.database.AndroidSandbox;
import org.commcare.models.database.SqlStorage;
import org.commcare.android.database.user.models.FormRecord;
import org.commcare.android.database.user.models.SessionStateDescriptor;
import org.commcare.suite.model.Text;
import org.commcare.tasks.templates.ManagedAsyncTask;
import org.commcare.util.FormDataUtil;
import org.commcare.utils.AndroidCommCarePlatform;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.LinkedList;
import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.Set;
/**
* Loads textual information for a list of FormRecords.
* <p/>
* This text currently includes the form name, record title, and last modified
* date
*
* @author ctsims
*/
public class FormRecordLoaderTask extends ManagedAsyncTask<FormRecord, Pair<FormRecord, ArrayList<String>>, Integer> {
private Hashtable<String, String> descriptorCache;
private final SqlStorage<SessionStateDescriptor> descriptorStorage;
private final AndroidCommCarePlatform platform;
private Hashtable<Integer, String[]> searchCache;
private final Context context;
// Functions to call when some or all of the data has been loaded. Data
// can be loaded normally, or be given precedence (priority), determining
// which callback is dispatched to the listeners.
private final ArrayList<FormRecordLoadListener> listeners = new ArrayList<>();
// These are all synchronized together
final private Queue<FormRecord> priorityQueue = new LinkedList<>();
// The IDs of FormRecords that have been loaded
private final Set<Integer> loaded = new HashSet<>();
// Maps form namespace (unique id for forms) to their form title
// (entry-point text). Needed because FormRecords don't have form title
// info, but do have the namespace.
private Hashtable<String, Text> formNames;
// Is the background task done loading all the FormRecord information?
private boolean loadingComplete = false;
public FormRecordLoaderTask(Context c, SqlStorage<SessionStateDescriptor> descriptorStorage, AndroidCommCarePlatform platform) {
this(c, descriptorStorage, null, platform);
}
private FormRecordLoaderTask(Context c, SqlStorage<SessionStateDescriptor> descriptorStorage, Hashtable<String, String> descriptorCache, AndroidCommCarePlatform platform) {
this.context = c;
this.descriptorStorage = descriptorStorage;
this.descriptorCache = descriptorCache;
this.platform = platform;
}
/**
* Create a copy of this loader task.
*/
public FormRecordLoaderTask spawn() {
FormRecordLoaderTask task = new FormRecordLoaderTask(context, descriptorStorage, descriptorCache, platform);
task.setListeners(listeners);
return task;
}
/**
* Pass in hashtables that will be used to store data that is loaded.
*
* @param searchCache maps FormRecord ID to an array of query-able form descriptor text
* @param formNames map from form namespaces to their titles
*/
public void init(Hashtable<Integer, String[]> searchCache, Hashtable<String, Text> formNames) {
this.searchCache = searchCache;
if (descriptorCache == null) {
descriptorCache = new Hashtable<>();
}
priorityQueue.clear();
loaded.clear();
this.formNames = formNames;
}
/**
* Set the listeners list, whose callbacks will be executed once the data
* has been loaded.
*
* @param listeners a list of objects to call when data is done loading
*/
private void setListeners(ArrayList<FormRecordLoadListener> listeners) {
this.listeners.addAll(listeners);
}
/**
* Add a listener to the list that is called once the data has been loaded.
*
* @param listener an objects to call when data is done loading
*/
public void addListener(FormRecordLoadListener listener) {
this.listeners.add(listener);
}
@Override
protected Integer doInBackground(FormRecord... params) {
// Load text information for every FormRecord passed in, unless task is
// cancelled before that.
FormRecord current;
int loadedFormCount = 0;
while (loadedFormCount < params.length && !isCancelled()) {
synchronized (priorityQueue) {
//If we have one to do immediately, grab it
if (!priorityQueue.isEmpty()) {
current = priorityQueue.poll();
} else {
current = params[loadedFormCount++];
}
if (loaded.contains(current.getID())) {
// skip if we already loaded this record due to priority queue
continue;
}
}
// load text about this record: last modified date, title of the record, and form name
ArrayList<String> recordTextDesc = loadRecordText(current);
loaded.add(current.getID());
// Copy data into search task and notify anything waiting on this
// record.
this.publishProgress(new Pair<>(current, recordTextDesc));
}
return 1;
}
private ArrayList<String> loadRecordText(FormRecord current) {
ArrayList<String> recordTextDesc = new ArrayList<>();
// Get the date in a searchable format.
recordTextDesc.add(DateUtils.formatDateTime(context, current.lastModified().getTime(), DateUtils.FORMAT_NO_MONTH_DAY | DateUtils.FORMAT_NO_YEAR).toLowerCase());
String dataTitle = current.getDescriptor();
if (TextUtils.isEmpty(dataTitle)) {
dataTitle = loadDataTitle(current.getID());
}
recordTextDesc.add(dataTitle);
if (formNames.containsKey(current.getFormNamespace())) {
Text name = formNames.get(current.getFormNamespace());
recordTextDesc.add(name.evaluate());
}
return recordTextDesc;
}
private String loadDataTitle(int formRecordId) {
// Grab our record hash
SessionStateDescriptor ssd = null;
try {
ssd = descriptorStorage.getRecordForValue(SessionStateDescriptor.META_FORM_RECORD_ID, formRecordId);
} catch (NoSuchElementException nsee) {
//s'all good
}
String dataTitle = "";
if (ssd != null) {
String descriptor = ssd.getSessionDescriptor();
if (!descriptorCache.containsKey(descriptor)) {
AndroidSessionWrapper asw = new AndroidSessionWrapper(platform);
asw.loadFromStateDescription(ssd);
try {
dataTitle =
FormDataUtil.getTitleFromSession(new AndroidSandbox(CommCareApplication.instance()),
asw.getSession(), asw.getEvaluationContext());
} catch (RuntimeException e) {
dataTitle = "[Unavailable]";
}
if (dataTitle == null) {
dataTitle = "";
}
descriptorCache.put(descriptor, dataTitle);
} else {
return descriptorCache.get(descriptor);
}
}
return dataTitle;
}
@Override
protected void onPreExecute() {
super.onPreExecute();
// Tell users of the data being loaded that it isn't ready yet.
this.loadingComplete = false;
}
/**
* Has all the FormRecords' textual data been loaded yet? Used to let
* users of the data only start accessing it once it is all there.
*/
public boolean doneLoadingFormRecords() {
return this.loadingComplete;
}
@Override
protected void onPostExecute(Integer result) {
super.onPostExecute(result);
this.loadingComplete = true;
for (FormRecordLoadListener listener : this.listeners) {
if (listener != null) {
listener.notifyLoaded();
}
}
// free up things we don't need to spawn new tasks
priorityQueue.clear();
loaded.clear();
formNames = null;
}
@Override
protected void onProgressUpdate(Pair<FormRecord, ArrayList<String>>... values) {
super.onProgressUpdate(values);
// copy a single form record's data out of method arguments
String[] vals = new String[values[0].second.size()];
for (int i = 0; i < vals.length; ++i) {
vals[i] = values[0].second.get(i);
}
// store the loaded data in the search cache
this.searchCache.put(values[0].first.getID(), vals);
for (FormRecordLoadListener listener : this.listeners) {
if (listener != null) {
// TODO PLM: pretty sure loaded.contains(values[0].first) is
// always true at this point.
listener.notifyPriorityLoaded(values[0].first,
loaded.contains(values[0].first.getID()));
}
}
}
<|fim▁hole|> if (loaded.contains(record.getID())) {
return false;
} else if (priorityQueue.contains(record)) {
// if we already have it in the queue, just move along
return true;
} else {
priorityQueue.add(record);
return true;
}
}
}
}<|fim▁end|> | public boolean registerPriority(FormRecord record) {
synchronized (priorityQueue) {
|
<|file_name|>is_pricelist_item.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from openerp import tools
from openerp import models,fields,api
from openerp.tools.translate import _
class is_pricelist_item(models.Model):
_name='is.pricelist.item'
_order='pricelist_name,price_version_id,sequence,product_id'
_auto = False
pricelist_name = fields.Char('Liste de prix')
pricelist_type = fields.Char('Type')
base = fields.Integer('Base')
price_version_id = fields.Many2one('product.pricelist.version', 'Version')
version_date_start = fields.Date('Date début version')
version_date_end = fields.Date('Date fin version')
product_id = fields.Many2one('product.product', 'Article')
gestionnaire_id = fields.Many2one('is.gestionnaire', 'Gestionnaire')
ref_client = fields.Char('Référence client')
ref_fournisseur = fields.Char('Référence fournisseur')
<|fim▁hole|> product_uom_id = fields.Many2one('product.uom', "Unité")
product_po_uom_id = fields.Many2one('product.uom', "Unité d'achat")
min_quantity = fields.Float('Quantité min.')
price_surcharge = fields.Float('Prix')
item_date_start = fields.Date('Date début ligne')
item_date_end = fields.Date('Date fin ligne')
def init(self, cr):
tools.drop_view_if_exists(cr, 'is_pricelist_item')
cr.execute("""
CREATE OR REPLACE view is_pricelist_item AS (
SELECT
ppi.id as id,
pl.name as pricelist_name,
pl.type as pricelist_type,
ppi.base as base,
ppi.price_version_id as price_version_id,
ppv.date_start as version_date_start,
ppv.date_end as version_date_end,
ppi.product_id as product_id,
pt.is_gestionnaire_id as gestionnaire_id,
pt.is_ref_client as ref_client,
pt.is_ref_fournisseur as ref_fournisseur,
pt.is_mold_dossierf as moule,
ppi.sequence as sequence,
pt.uom_id as product_uom_id,
pt.uom_po_id as product_po_uom_id,
ppi.min_quantity as min_quantity,
ppi.price_surcharge as price_surcharge,
ppi.date_start as item_date_start,
ppi.date_end as item_date_end
FROM product_pricelist_item ppi inner join product_product pp on ppi.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
inner join product_pricelist_version ppv on ppi.price_version_id=ppv.id
inner join product_pricelist pl on ppv.pricelist_id = pl.id
WHERE ppi.id>0
)
""")
@api.multi
def action_liste_items(self):
for obj in self:
print obj.price_version_id.pricelist_id
if obj.price_version_id.pricelist_id.type=='sale':
view_id=self.env.ref('is_plastigray.is_product_pricelist_item_sale_tree_view').id
pricelist_type='sale'
else:
view_id=self.env.ref('is_plastigray.is_product_pricelist_item_purchase_tree_view').id
pricelist_type='purchase'
return {
'name': str(obj.pricelist_name)+" ("+str(obj.price_version_id.name)+")",
'view_mode': 'tree',
'view_type': 'form',
'res_model': 'product.pricelist.item',
'type': 'ir.actions.act_window',
#'view_id': view_id.id,
'view_id' : False,
'views' : [(view_id, 'tree')],
'domain': [('price_version_id','=',obj.price_version_id.id)],
'context': {
'default_price_version_id': obj.price_version_id.id,
'type': pricelist_type,
}
}
# return {
# 'name': u'Lignes des factures client actualisées à '+str(now),
# 'view_mode': 'tree,form,graph',
# 'view_type': 'form',
# 'view_id' : False,
# 'views' : [(view_id, 'tree'),(False, 'form'),(False, 'graph')],
# 'res_model': 'is.account.invoice.line',
# 'domain' : [('type','=', 'out_invoice')],
# 'type': 'ir.actions.act_window',
# }
@api.multi
def corriger_anomalie_pricelist(self):
for obj in self:
base=False
if obj.pricelist_type=='purchase' and obj.base!=2:
base=2
if obj.pricelist_type=='sale' and obj.base!=1:
base=1
if base:
items=self.env['product.pricelist.item'].browse(obj.id)
for item in items:
item.base=base<|fim▁end|> | moule = fields.Char('Moule ou Dossier F')
sequence = fields.Integer('Sequence')
|
<|file_name|>policy.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import re
import urllib
import urllib2
from oslo.config import cfg
import six
from kwstandby.openstack.common import fileutils
from kwstandby.openstack.common.gettextutils import _
from kwstandby.openstack.common import jsonutils
from kwstandby.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file containing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule enforced when requested rule is not found')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
"""
def __init__(self, policy_file=None, rules=None, default_rule=None):
self.rules = Rules(rules)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
if overwrite:
self.rules = Rules(rules)
else:
self.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if not self.policy_path:
self.policy_path = self._get_policy_path()
reloaded, data = fileutils.read_cached_file(self.policy_path,
force_reload=force_reload)
if reloaded:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug(_("Rules successfully reloaded"))
def _get_policy_path(self):
"""Locate the policy json data file.
:param policy_file: Custom policy file to locate.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file couldn't
be located.
"""
policy_file = CONF.find_file(self.policy_file)
if policy_file:
return policy_file
raise cfg.ConfigFilesNotFoundError(path=CONF.policy_file)
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# NOTE(flaper87): Not logging target or creds to avoid
# potential security issues.
LOG.debug(_("Rule %s will be now enforced") % rule)
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug(_("Rule [%s] doesn't exist") % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
class BaseCheck(object):
"""Abstract base class for Check classes."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):<|fim▁hole|> A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False<|fim▁end|> | """Implements the "not" logical operator.
|
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# conf.py
"""
Loading a configuration
~~~~~~~~~~~~~~~~~~~~~~~
Various aspects of PyPhi's behavior can be configured.
When PyPhi is imported, it checks for a YAML file named ``pyphi_config.yml`` in
the current directory and automatically loads it if it exists; otherwise the
default configuration is used.
.. only:: never
This py.test fixture resets PyPhi config back to defaults after running
this doctest. This will not be shown in the output markup.
>>> getfixture('restore_config_afterwards')
The various settings are listed here with their defaults.
>>> import pyphi
>>> defaults = pyphi.config.defaults()
Print the ``config`` object to see the current settings:
>>> print(pyphi.config) # doctest: +SKIP
{ 'ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS': False,
'CACHE_SIAS': False,
'CACHE_POTENTIAL_PURVIEWS': True,
'CACHING_BACKEND': 'fs',
...
Setting can be changed on the fly by assigning them a new value:
>>> pyphi.config.PROGRESS_BARS = False
It is also possible to manually load a configuration file:
>>> pyphi.config.load_file('pyphi_config.yml')
Or load a dictionary of configuration values:
>>> pyphi.config.load_dict({'PRECISION': 1})
Approximations and theoretical options
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These settings control the algorithms PyPhi uses.
- :attr:`~pyphi.conf.PyphiConfig.ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS`
- :attr:`~pyphi.conf.PyphiConfig.CUT_ONE_APPROXIMATION`
- :attr:`~pyphi.conf.PyphiConfig.MEASURE`
- :attr:`~pyphi.conf.PyphiConfig.ACTUAL_CAUSATION_MEASURE`
- :attr:`~pyphi.conf.PyphiConfig.PARTITION_TYPE`
- :attr:`~pyphi.conf.PyphiConfig.PICK_SMALLEST_PURVIEW`
- :attr:`~pyphi.conf.PyphiConfig.USE_SMALL_PHI_DIFFERENCE_FOR_CES_DISTANCE`
- :attr:`~pyphi.conf.PyphiConfig.SYSTEM_CUTS`
- :attr:`~pyphi.conf.PyphiConfig.SINGLE_MICRO_NODES_WITH_SELFLOOPS_HAVE_PHI`
- :attr:`~pyphi.conf.PyphiConfig.VALIDATE_SUBSYSTEM_STATES`
- :attr:`~pyphi.conf.PyphiConfig.VALIDATE_CONDITIONAL_INDEPENDENCE`
Parallelization and system resources
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These settings control how much processing power and memory is available for
PyPhi to use. The default values may not be appropriate for your use-case or
machine, so **please check these settings before running anything**. Otherwise,
there is a risk that simulations might crash (potentially after running for a
long time!), resulting in data loss.
- :attr:`~pyphi.conf.PyphiConfig.PARALLEL_CONCEPT_EVALUATION`
- :attr:`~pyphi.conf.PyphiConfig.PARALLEL_CUT_EVALUATION`
- :attr:`~pyphi.conf.PyphiConfig.PARALLEL_COMPLEX_EVALUATION`
- :attr:`~pyphi.conf.PyphiConfig.NUMBER_OF_CORES`
- :attr:`~pyphi.conf.PyphiConfig.MAXIMUM_CACHE_MEMORY_PERCENTAGE`
.. important::
Only one of ``PARALLEL_CONCEPT_EVALUATION``, ``PARALLEL_CUT_EVALUATION``,
and ``PARALLEL_COMPLEX_EVALUATION`` can be set to ``True`` at a time.
**For most networks,** ``PARALLEL_CUT_EVALUATION`` **is the most
efficient.** This is because the algorithm is exponential time in the
number of nodes, so most of the time is spent on the largest subsystem.
You should only parallelize concept evaluation if you are just computing a
|CauseEffectStructure|.
Memoization and caching
~~~~~~~~~~~~~~~~~~~~~~~
PyPhi provides a number of ways to cache intermediate results.
- :attr:`~pyphi.conf.PyphiConfig.CACHE_SIAS`
- :attr:`~pyphi.conf.PyphiConfig.CACHE_REPERTOIRES`
- :attr:`~pyphi.conf.PyphiConfig.CACHE_POTENTIAL_PURVIEWS`
- :attr:`~pyphi.conf.PyphiConfig.CLEAR_SUBSYSTEM_CACHES_AFTER_COMPUTING_SIA`
- :attr:`~pyphi.conf.PyphiConfig.CACHING_BACKEND`
- :attr:`~pyphi.conf.PyphiConfig.FS_CACHE_VERBOSITY`
- :attr:`~pyphi.conf.PyphiConfig.FS_CACHE_DIRECTORY`
- :attr:`~pyphi.conf.PyphiConfig.MONGODB_CONFIG`
- :attr:`~pyphi.conf.PyphiConfig.REDIS_CACHE`
- :attr:`~pyphi.conf.PyphiConfig.REDIS_CONFIG`
Logging
~~~~~~~
These settings control how PyPhi handles messages. Logs can be written to
standard output, a file, both, or none. If these simple default controls are
not flexible enough for you, you can override the entire logging configuration.
See the `documentation on Python's logger
<https://docs.python.org/3/library/logging.html>`_ for more information.
- :attr:`~pyphi.conf.PyphiConfig.WELCOME_OFF`
- :attr:`~pyphi.conf.PyphiConfig.LOG_STDOUT_LEVEL`
- :attr:`~pyphi.conf.PyphiConfig.LOG_FILE_LEVEL`
- :attr:`~pyphi.conf.PyphiConfig.LOG_FILE`
- :attr:`~pyphi.conf.PyphiConfig.PROGRESS_BARS`
- :attr:`~pyphi.conf.PyphiConfig.REPR_VERBOSITY`
- :attr:`~pyphi.conf.PyphiConfig.PRINT_FRACTIONS`
Numerical precision
~~~~~~~~~~~~~~~~~~~
- :attr:`~pyphi.conf.PyphiConfig.PRECISION`
The ``config`` API
~~~~~~~~~~~~~~~~~~
"""
# pylint: disable=protected-access
import contextlib
import logging
import logging.config
import os
import pprint
from copy import copy
from pathlib import Path
import joblib
import yaml
from . import __about__, constants
log = logging.getLogger(__name__)
_VALID_LOG_LEVELS = [None, "CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
class Option:
"""A descriptor implementing PyPhi configuration options.
Args:
default: The default value of this ``Option``.
Keyword Args:
values (list): Allowed values for this option. A ``ValueError`` will
be raised if ``values`` is not ``None`` and the option is set to
be a value not in the list.
on_change (function): Optional callback that is called when the value
of the option is changed. The ``Config`` instance is passed as
the only argument to the callback.
doc (str): Optional docstring for the option.
"""
def __init__(self, default, values=None, type=None, on_change=None, doc=None):
self.default = default
self.values = values
self.type = type
self.on_change = on_change
self.doc = doc
self.__doc__ = self._docstring()
def __set_name__(self, owner, name):
self.name = name
def _docstring(self):
default = "``default={}``".format(repr(self.default))
values = (
", ``values={}``".format(repr(self.values))
if self.values is not None
else ""
)
on_change = (
", ``on_change={}``".format(self.on_change.__name__)
if self.on_change is not None
else ""
)
return "{}{}{}\n{}".format(default, values, on_change, self.doc or "")
def __get__(self, obj, cls=None):
if obj is None:
return self
return obj._values[self.name]
def __set__(self, obj, value):
self._validate(value)
obj._values[self.name] = value
self._callback(obj)
def _validate(self, value):
"""Validate the new value."""
if self.type is not None and not isinstance(value, self.type):
raise ValueError(
"{} must be of type {} for {}; got {}".format(
value, self.type, self.name, type(value)
)
)
if self.values and value not in self.values:
raise ValueError(
"{} ({}) is not a valid value for {}; must be one of:\n {}".format(
value,
type(value),
self.name,
"\n ".join(["{} ({})".format(v, type(v)) for v in self.values]),
)
)
def _callback(self, obj):
"""Trigger any callbacks."""
if self.on_change is not None:
self.on_change(obj)
class Config:
"""Base configuration object.
See ``PyphiConfig`` for usage.
"""
def __init__(self):
self._values = {}
self._loaded_files = []
# Set the default value of each ``Option``
for name, opt in self.options().items():
opt._validate(opt.default)
self._values[name] = opt.default
# Call hooks for each Option
# (This must happen *after* all default values are set so that
# logging can be properly configured.
for opt in self.options().values():
opt._callback(self)
def __str__(self):
return pprint.pformat(self._values, indent=2)
def __setattr__(self, name, value):
if name.startswith("_") or name in self.options().keys():
super().__setattr__(name, value)
else:
raise ValueError("{} is not a valid config option".format(name))
def __iter__(self):
return iter(self._values.items())
@classmethod
def options(cls):
"""Return a dictionary of the ``Option`` objects for this config."""
return {k: v for k, v in cls.__dict__.items() if isinstance(v, Option)}
def defaults(self):
"""Return the default values of this configuration."""
return {k: v.default for k, v in self.options().items()}
def load_dict(self, dct):
"""Load a dictionary of configuration values."""
for k, v in dct.items():
setattr(self, k, v)
def load_file(self, filename):
"""Load config from a YAML file."""
filename = os.path.abspath(filename)
with open(filename) as f:
self.load_dict(yaml.safe_load(f))
self._loaded_files.append(filename)
def snapshot(self):
"""Return a snapshot of the current values of this configuration."""
return copy(self._values)
def override(self, **new_values):
"""Decorator and context manager to override configuration values.
The initial configuration values are reset after the decorated function
returns or the context manager completes it block, even if the function
or block raises an exception. This is intended to be used by tests
which require specific configuration values.
Example:
>>> from pyphi import config
>>> @config.override(PRECISION=20000)
... def test_something():
... assert config.PRECISION == 20000
...
>>> test_something()
>>> with config.override(PRECISION=100):
... assert config.PRECISION == 100
...
"""
return _override(self, **new_values)
class _override(contextlib.ContextDecorator):
"""See ``Config.override`` for usage."""
def __init__(self, conf, **new_values):
self.conf = conf
self.new_values = new_values
self.initial_values = conf.snapshot()
def __enter__(self):
"""Save original config values; override with new ones."""
self.conf.load_dict(self.new_values)
def __exit__(self, *exc):
"""Reset config to initial values; reraise any exceptions."""
self.conf.load_dict(self.initial_values)
return False
def configure_logging(conf):
"""Reconfigure PyPhi logging based on the current configuration."""
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(name)s] %(levelname)s "
"%(processName)s: %(message)s"
}
},
"handlers": {
"file": {
"level": conf.LOG_FILE_LEVEL,
"filename": conf.LOG_FILE,
"class": "logging.FileHandler",
"formatter": "standard",
},
"stdout": {
"level": conf.LOG_STDOUT_LEVEL,
"class": "pyphi.log.TqdmHandler",
"formatter": "standard",
},
},
"root": {
"level": "DEBUG",
"handlers": (["file"] if conf.LOG_FILE_LEVEL else [])
+ (["stdout"] if conf.LOG_STDOUT_LEVEL else []),
},
}
)
def configure_joblib(conf):
constants.joblib_memory = joblib.Memory(
location=conf.FS_CACHE_DIRECTORY, verbose=conf.FS_CACHE_VERBOSITY
)
def configure_precision(conf):
constants.EPSILON = 10 ** (-conf.PRECISION)
class PyphiConfig(Config):
"""``pyphi.config`` is an instance of this class."""
ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS = Option(
False,
type=bool,
doc="""
In certain cases, making a cut can actually cause a previously reducible
concept to become a proper, irreducible concept. Assuming this can never
happen can increase performance significantly, however the obtained results
are not strictly accurate.""",
)
CUT_ONE_APPROXIMATION = Option(
False,
type=bool,
doc="""
When determining the MIP for |big_phi|, this restricts the set of system
cuts that are considered to only those that cut the inputs or outputs of a
single node. This restricted set of cuts scales linearly with the size of
the system; the full set of all possible bipartitions scales
exponentially. This approximation is more likely to give theoretically
accurate results with modular, sparsely-connected, or homogeneous
networks.""",
)
MEASURE = Option(
"EMD",
doc="""
The measure to use when computing distances between repertoires and
concepts. A full list of currently installed measures is available by
calling ``print(pyphi.distance.measures.all())``. Note that some measures
cannot be used for calculating |big_phi| because they are asymmetric.
Custom measures can be added using the ``pyphi.distance.measures.register``
decorator. For example::
from pyphi.distance import measures
@measures.register('ALWAYS_ZERO')
def always_zero(a, b):
return 0
This measure can then be used by setting
``config.MEASURE = 'ALWAYS_ZERO'``.
If the measure is asymmetric you should register it using the
``asymmetric`` keyword argument. See :mod:`~pyphi.distance` for examples.
""",
)
ACTUAL_CAUSATION_MEASURE = Option(
"PMI",
doc="""
The measure to use when computing the pointwise information between state
probabilities in the actual causation module.
See documentation for ``config.MEASURE`` for more information on
configuring measures.
""",
)
PARALLEL_CONCEPT_EVALUATION = Option(
False,
type=bool,
doc="""
Controls whether concepts are evaluated in parallel when computing
cause-effect structures.""",
)
PARALLEL_CUT_EVALUATION = Option(
True,
type=bool,
doc="""
Controls whether system cuts are evaluated in parallel, which is faster but
requires more memory. If cuts are evaluated sequentially, only two
|SystemIrreducibilityAnalysis| instances need to be in memory at once.""",
)
PARALLEL_COMPLEX_EVALUATION = Option(
False,
type=bool,
doc="""
Controls whether systems are evaluated in parallel when computing
complexes.""",
)
NUMBER_OF_CORES = Option(
-1,
type=int,
doc="""
Controls the number of CPU cores used to evaluate unidirectional cuts.
Negative numbers count backwards from the total number of available cores,
with ``-1`` meaning 'use all available cores.'""",
)
MAXIMUM_CACHE_MEMORY_PERCENTAGE = Option(
50,
type=int,
doc="""
PyPhi employs several in-memory caches to speed up computation. However,
these can quickly use a lot of memory for large networks or large numbers
of them; to avoid thrashing, this setting limits the percentage of a
system's RAM that the caches can collectively use.""",
)
CACHE_SIAS = Option(
False,
type=bool,
doc="""
PyPhi is equipped with a transparent caching system for
|SystemIrreducibilityAnalysis| objects which stores them as they are
computed to avoid having to recompute them later. This makes it easy to
play around interactively with the program, or to accumulate results with
minimal effort. For larger projects, however, it is recommended that you
manage the results explicitly, rather than relying on the cache. For this
reason it is disabled by default.""",
)
CACHE_REPERTOIRES = Option(
True,
type=bool,
doc="""
PyPhi caches cause and effect repertoires. This greatly improves speed, but
can consume a significant amount of memory. If you are experiencing memory
issues, try disabling this.""",
)
CACHE_POTENTIAL_PURVIEWS = Option(
True,
type=bool,
doc="""
Controls whether the potential purviews of mechanisms of a network are
cached. Caching speeds up computations by not recomputing expensive
reducibility checks, but uses additional memory.""",
)
CLEAR_SUBSYSTEM_CACHES_AFTER_COMPUTING_SIA = Option(
False,
type=bool,
doc="""
Controls whether a |Subsystem|'s repertoire and MICE caches are cleared
with |Subsystem.clear_caches()| after computing the
|SystemIrreducibilityAnalysis|. If you don't need to do any more
computations after running |compute.sia()|, then enabling this may help
conserve memory.""",
)
CACHING_BACKEND = Option(
"fs",
values=["fs", "db"],
doc="""
Controls whether precomputed results are stored and read from a local
filesystem-based cache in the current directory or from a database. Set
this to ``'fs'`` for the filesystem, ``'db'`` for the database.""",
)
FS_CACHE_VERBOSITY = Option(
0,
type=int,
on_change=configure_joblib,
doc="""
Controls how much caching information is printed if the filesystem cache is
used. Takes a value between ``0`` and ``11``.""",
)
FS_CACHE_DIRECTORY = Option(
"__pyphi_cache__",
type=(str, Path),
on_change=configure_joblib,
doc="""
If the filesystem is used for caching, the cache will be stored in this
directory. This directory can be copied and moved around if you want to
reuse results *e.g.* on a another computer, but it must be in the same
directory from which Python is being run.""",
)
MONGODB_CONFIG = Option(
{
"host": "localhost",
"port": 27017,
"database_name": "pyphi",
"collection_name": "cache",
},
type=dict,
doc="""
Set the configuration for the MongoDB database backend (only has an
effect if ``CACHING_BACKEND`` is ``'db'``).""",
)
REDIS_CACHE = Option(
False,
type=bool,
doc="""
Specifies whether to use Redis to cache |MICE|.""",
)
REDIS_CONFIG = Option(
{"host": "localhost", "port": 6379, "db": 0, "test_db": 1,},
type=dict,
doc="""
Configure the Redis database backend. These are the defaults in the
provided ``redis.conf`` file.""",
)
WELCOME_OFF = Option(
False,
type=bool,
doc="""
Specifies whether to suppress the welcome message when PyPhi is imported.
Alternatively, you may suppress the message by setting the environment
variable ``PYPHI_WELCOME_OFF`` to any value in your shell:
.. code-block:: bash
export PYPHI_WELCOME_OFF='yes'
The message will not print if either this option is ``True`` or the
environment variable is set.""",
)
LOG_FILE = Option(
"pyphi.log",
type=(str, Path),
on_change=configure_logging,
doc="""
Controls the name of the log file.""",
)
LOG_FILE_LEVEL = Option(
"INFO",
values=_VALID_LOG_LEVELS,
on_change=configure_logging,
doc="""
Controls the level of log messages written to the log
file. This setting has the same possible values as
``LOG_STDOUT_LEVEL``.""",
)
LOG_STDOUT_LEVEL = Option(
"WARNING",
values=_VALID_LOG_LEVELS,
on_change=configure_logging,
doc="""
Controls the level of log messages written to standard
output. Can be one of ``'DEBUG'``, ``'INFO'``, ``'WARNING'``, ``'ERROR'``,
``'CRITICAL'``, or ``None``. ``'DEBUG'`` is the least restrictive level and
will show the most log messages. ``'CRITICAL'`` is the most restrictive
level and will only display information about fatal errors. If set to
``None``, logging to standard output will be disabled entirely.""",
)
PROGRESS_BARS = Option(
True,
type=bool,
doc="""
Controls whether to show progress bars on the console.
.. tip::
If you are iterating over many systems rather than doing one
long-running calculation, consider disabling this for speed.""",
)
PRECISION = Option(
6,
type=int,
on_change=configure_precision,
doc="""
If ``MEASURE`` is ``EMD``, then the Earth Mover's Distance is calculated
with an external C++ library that a numerical optimizer to find a good
approximation. Consequently, systems with analytically zero |big_phi| will
sometimes be numerically found to have a small but non-zero amount. This
setting controls the number of decimal places to which PyPhi will consider
EMD calculations accurate. Values of |big_phi| lower than ``10e-PRECISION``
will be considered insignificant and treated as zero. The default value is
about as accurate as the EMD computations get.""",
)
VALIDATE_SUBSYSTEM_STATES = Option(
True,
type=bool,
doc="""
Controls whether PyPhi checks if the subsystems's state is possible
(reachable with nonzero probability from some previous state), given the
subsystem's TPM (**which is conditioned on background conditions**). If
this is turned off, then **calculated** |big_phi| **values may not be
valid**, since they may be associated with a subsystem that could never be
in the given state.""",
)
<|fim▁hole|> True,
type=bool,
doc="""
Controls whether PyPhi checks if a system's TPM is conditionally
independent.""",
)
SINGLE_MICRO_NODES_WITH_SELFLOOPS_HAVE_PHI = Option(
False,
type=bool,
doc="""
If set to ``True``, the |big_phi| value of single micro-node subsystems is
the difference between their unpartitioned |CauseEffectStructure| (a single
concept) and the null concept. If set to False, their |big_phi| is defined
to be zero. Single macro-node subsystems may always be cut, regardless of
circumstances.""",
)
REPR_VERBOSITY = Option(
2,
type=int,
values=[0, 1, 2],
doc="""
Controls the verbosity of ``__repr__`` methods on PyPhi objects. Can be set
to ``0``, ``1``, or ``2``. If set to ``1``, calling ``repr`` on PyPhi
objects will return pretty-formatted and legible strings, excluding
repertoires. If set to ``2``, ``repr`` calls also include repertoires.
Although this breaks the convention that ``__repr__`` methods should return
a representation which can reconstruct the object, readable representations
are convenient since the Python REPL calls ``repr`` to represent all
objects in the shell and PyPhi is often used interactively with the
REPL. If set to ``0``, ``repr`` returns more traditional object
representations.""",
)
PRINT_FRACTIONS = Option(
True,
type=bool,
doc="""
Controls whether numbers in a ``repr`` are printed as fractions. Numbers
are still printed as decimals if the fraction's denominator would be
large. This only has an effect if ``REPR_VERBOSITY > 0``.""",
)
PARTITION_TYPE = Option(
"BI",
doc="""
Controls the type of partition used for |small_phi| computations.
If set to ``'BI'``, partitions will have two parts.
If set to ``'TRI'``, partitions will have three parts. In addition,
computations will only consider partitions that strictly partition the
mechanism. That is, for the mechanism ``(A, B)`` and purview ``(B, C, D)``
the partition::
A,B ∅
─── ✕ ───
B C,D
is not considered, but::
A B
─── ✕ ───
B C,D
is. The following is also valid::
A,B ∅
─── ✕ ─────
∅ B,C,D
In addition, this setting introduces "wedge" tripartitions of the form::
A B ∅
─── ✕ ─── ✕ ───
B C D
where the mechanism in the third part is always empty.
Finally, if set to ``'ALL'``, all possible partitions will be tested.
You can experiment with custom partitioning strategies using the
``pyphi.partition.partition_types.register`` decorator. For example::
from pyphi.models import KPartition, Part
from pyphi.partition import partition_types
@partition_types.register('SINGLE_NODE')
def single_node_partitions(mechanism, purview, node_labels=None):
for element in mechanism:
element = tuple([element])
others = tuple(sorted(set(mechanism) - set(element)))
part1 = Part(mechanism=element, purview=())
part2 = Part(mechanism=others, purview=purview)
yield KPartition(part1, part2, node_labels=node_labels)
This generates the set of partitions that cut connections between a single
mechanism element and the entire purview. The mechanism and purview of each
|Part| remain undivided - only connections *between* parts are severed.
You can use this new partititioning scheme by setting
``config.PARTITION_TYPE = 'SINGLE_NODE'``.
See :mod:`~pyphi.partition` for more examples.""",
)
PICK_SMALLEST_PURVIEW = Option(
False,
type=bool,
doc="""
When computing a |MIC| or |MIE|, it is possible for several MIPs to have
the same |small_phi| value. If this setting is set to ``True`` the MIP with
the smallest purview is chosen; otherwise, the one with largest purview is
chosen.""",
)
USE_SMALL_PHI_DIFFERENCE_FOR_CES_DISTANCE = Option(
False,
type=bool,
doc="""
If set to ``True``, the distance between cause-effect structures (when
computing a |SystemIrreducibilityAnalysis|) is calculated using the
difference between the sum of |small_phi| in the cause-effect structures
instead of the extended EMD.""",
)
SYSTEM_CUTS = Option(
"3.0_STYLE",
values=["3.0_STYLE", "CONCEPT_STYLE"],
doc="""
If set to ``'3.0_STYLE'``, then traditional IIT 3.0 cuts will be used when
computing |big_phi|. If set to ``'CONCEPT_STYLE'``, then experimental
concept-style system cuts will be used instead.""",
)
def log(self):
"""Log current settings."""
log.info("PyPhi v%s", __about__.__version__)
if self._loaded_files:
log.info("Loaded configuration from %s", self._loaded_files)
else:
log.info("Using default configuration (no configuration file " "provided)")
log.info("Current PyPhi configuration:\n %s", str(self))
PYPHI_CONFIG_FILENAME = "pyphi_config.yml"
config = PyphiConfig()
# Try and load the config file
if os.path.exists(PYPHI_CONFIG_FILENAME):
config.load_file(PYPHI_CONFIG_FILENAME)
# Log the PyPhi version and loaded configuration
config.log()<|fim▁end|> | VALIDATE_CONDITIONAL_INDEPENDENCE = Option( |
<|file_name|>client.py<|end_file_name|><|fim▁begin|># This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options<|fim▁hole|>from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
#options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)<|fim▁end|> | from suds.properties import Unskin |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod bencode;<|fim▁hole|><|fim▁end|> | pub mod convert;
pub mod dict;
pub mod list; |
<|file_name|>OCNMapService.java<|end_file_name|><|fim▁begin|>package tc.oc.api.ocn;
import java.util.Collection;
import javax.inject.Singleton;<|fim▁hole|>import com.google.common.util.concurrent.ListenableFuture;
import tc.oc.api.docs.MapRating;
import tc.oc.api.docs.virtual.MapDoc;
import tc.oc.api.docs.virtual.UserDoc;
import tc.oc.api.exceptions.NotFound;
import tc.oc.api.http.HttpOption;
import tc.oc.api.maps.MapRatingsRequest;
import tc.oc.api.maps.MapRatingsResponse;
import tc.oc.api.maps.MapService;
import tc.oc.api.maps.UpdateMapsResponse;
import tc.oc.api.model.HttpModelService;
import tc.oc.commons.core.concurrent.FutureUtils;
import tc.oc.commons.core.stream.Collectors;
@Singleton
class OCNMapService extends HttpModelService<MapDoc, MapDoc> implements MapService {
public ListenableFuture<Object> rate(MapRating rating) {
return this.client().post(memberUri(rating.map_id, "rate"), rating, Object.class, HttpOption.INFINITE_RETRY);
}
public ListenableFuture<MapRatingsResponse> getRatings(MapRatingsRequest request) {
return this.client().post(memberUri(request.map_id, "get_ratings"), request, MapRatingsResponse.class, HttpOption.INFINITE_RETRY);
}
public UpdateMapsResponse updateMaps(Collection<? extends MapDoc> maps) {
final ListenableFuture<MapUpdateMultiResponse> future = updateMulti(maps, MapUpdateMultiResponse.class);
return new UpdateMapsResponse(
(ListenableFuture) future,
maps.stream()
.flatMap(MapDoc::authorAndContributorUuids)
.distinct()
.collect(Collectors.mappingTo(uuid -> FutureUtils.mapSync(
future,
response -> {
final UserDoc.Identity user = response.users_by_uuid.get(uuid);
if(user != null) return user;
throw new NotFound();
}
)))
);
}
}<|fim▁end|> | |
<|file_name|>astpprint.py<|end_file_name|><|fim▁begin|>"""Python AST pretty-printer.
Copyright(C) 2007, Martin Blais <[email protected]>
This module exports a function that can be used to print a human-readable
version of the AST.
This code is downloaded verbatim from:
http://code.activestate.com/recipes/533146/
<|fim▁hole|>import sys
__all__ = ('printAst','getAststr')
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def getAststr(astmod, ast, indent=' ', initlevel=0):
"Pretty-print an AST to the given output stream."
stream = StringIO()
rec_node(astmod, ast, initlevel, indent, stream.write)
stream.write('\n')
stream.seek(0)
return stream.read()
def printAst(astmod, ast, indent=' ', stream=sys.stdout, initlevel=0):
"Pretty-print an AST to the given output stream."
rec_node(astmod, ast, initlevel, indent, stream.write)
stream.write('\n')
stream.flush()
def rec_node(astmod, node, level, indent, write):
"Recurse through a node, pretty-printing it."
pfx = indent * level
if isinstance(node, astmod.Node):
write(pfx)
write(node.__class__.__name__)
write('(')
i = 0
for child in node.getChildren():
if not isinstance(child, astmod.Node):
continue
if i != 0:
write(',')
write('\n')
rec_node(astmod, child, level+1, indent, write)
i += 1
if i == 0:
# None of the children as nodes, simply join their repr on a single
# line.
res = []
for child in node.getChildren():
res.append(repr(child))
write(', '.join(res))
else:
write('\n')
write(pfx)
write(')')
else:
write(pfx)
write(repr(node))
def main():
from compiler import ast
import optparse
parser = optparse.OptionParser(__doc__.strip())
opts, args = parser.parse_args()
if not args:
parser.error("You need to specify the name of Python files to print out.")
import compiler, traceback
for fn in args:
print('\n\n%s:\n' % fn)
try:
printAst(ast, compiler.parseFile(fn), initlevel=1)
except SyntaxError, e:
traceback.print_exc()
if __name__ == '__main__':
main()<|fim▁end|> | """
__author__ = 'Martin Blais <[email protected]>'
|
<|file_name|>include_dirs.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | var path = require('path');
console.log(path.join(path.relative('.', __dirname),'include')); |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright (C) 2020 Natanael Mojica <[email protected]>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//<|fim▁hole|>// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
use gst::gst_plugin_define;
mod filter;
fn plugin_init(plugin: &gst::Plugin) -> Result<(), glib::BoolError> {
filter::register(plugin)?;
Ok(())
}
gst_plugin_define!(
csound,
env!("CARGO_PKG_DESCRIPTION"),
plugin_init,
concat!(env!("CARGO_PKG_VERSION"), "-", env!("COMMIT_ID")),
"MIT/X11",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_REPOSITORY"),
env!("BUILD_REL_DATE")
);<|fim▁end|> | // This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
<|file_name|>marking.rs<|end_file_name|><|fim▁begin|>use crate::gc::root::Slot;
use crate::gc::{Address, Region};
pub fn start(rootset: &[Slot], heap: Region, perm: Region) {<|fim▁hole|>
for root in rootset {
let root_ptr = root.get();
if heap.contains(root_ptr) {
let root_obj = root_ptr.to_mut_obj();
if !root_obj.header().is_marked_non_atomic() {
marking_stack.push(root_ptr);
root_obj.header_mut().mark_non_atomic();
}
} else {
debug_assert!(root_ptr.is_null() || perm.contains(root_ptr));
}
}
while marking_stack.len() > 0 {
let object_addr = marking_stack.pop().expect("stack already empty");
let object = object_addr.to_mut_obj();
object.visit_reference_fields(|field| {
let field_addr = field.get();
if heap.contains(field_addr) {
let field_obj = field_addr.to_mut_obj();
if !field_obj.header().is_marked_non_atomic() {
marking_stack.push(field_addr);
field_obj.header_mut().mark_non_atomic();
}
} else {
debug_assert!(field_addr.is_null() || perm.contains(field_addr));
}
});
}
}<|fim▁end|> | let mut marking_stack: Vec<Address> = Vec::new(); |
<|file_name|>describe.py<|end_file_name|><|fim▁begin|># Copyright 2014 Google Inc. All Rights Reserved.
"""Command for describing target HTTPS proxies."""
from googlecloudsdk.compute.lib import base_classes
class Describe(base_classes.GlobalDescriber):
"""Display detailed information about a target HTTPS proxy."""
@staticmethod
def Args(parser):<|fim▁hole|> cli = Describe.GetCLIGenerator()
base_classes.GlobalDescriber.Args(
parser, 'compute.targetHttpsProxies', cli,
'compute.target-https-proxies')
base_classes.AddFieldsFlag(parser, 'targetHttpsProxies')
@property
def service(self):
return self.compute.targetHttpsProxies
@property
def resource_type(self):
return 'targetHttpsProxies'
Describe.detailed_help = {
'brief': 'Display detailed information about a target HTTPS proxy',
'DESCRIPTION': """\
*{command}* displays all data associated with a target HTTPS proxy
in a project.
""",
}<|fim▁end|> | |
<|file_name|>framerate.rs<|end_file_name|><|fim▁begin|>//! Framerate control
use libc;
use libc::{c_void, size_t};
use std::mem;
use ::get_error;
use sys::gfx;
/// Structure holding the state and timing information of the framerate controller.
pub struct FPSManager {
raw: *mut gfx::framerate::FPSmanager,
}
impl FPSManager {
/// Create the framerate manager.
pub fn new() -> FPSManager {<|fim▁hole|> FPSManager { raw: raw }
}
}
/// Set the framerate in Hz.
pub fn set_framerate(&mut self, rate: u32) -> Result<(), String> {
let ret = unsafe { gfx::framerate::SDL_setFramerate(self.raw, rate as u32) };
match ret {
0 => Ok(()),
_ => Err(get_error())
}
}
/// Return the current target framerate in Hz.
pub fn get_framerate(&self) -> i32 {
// will not get an error
unsafe { gfx::framerate::SDL_getFramerate(self.raw) as i32 }
}
/// Return the current framecount.
pub fn get_frame_count(&self) -> i32 {
// will not get an error
unsafe { gfx::framerate::SDL_getFramecount(self.raw) as i32 }
}
/// Delay execution to maintain a constant framerate and calculate fps.
pub fn delay(&mut self) -> u32 {
unsafe { gfx::framerate::SDL_framerateDelay(self.raw) as u32 }
}
}
impl Drop for FPSManager {
fn drop(&mut self) {
unsafe { libc::free(self.raw as *mut c_void) }
}
}<|fim▁end|> | unsafe {
let size = mem::size_of::<gfx::framerate::FPSmanager>() as size_t;
let raw = libc::malloc(size) as *mut gfx::framerate::FPSmanager;
gfx::framerate::SDL_initFramerate(raw); |
<|file_name|>SplinterLogTest.java<|end_file_name|><|fim▁begin|>package com.splinter.graphing;
import org.junit.Assert;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
public class SplinterLogTest {
@Test
public void testDisableLogs() {
try {
SLog.setEnabled(false);
String expected = "";
Assert.assertEquals(expected, new SLogStop("Coffee Time", "coffeeComplete")
.withOperationAlias("ensureCapacity")
.withComponentOverride("WaterReservoir")<|fim▁hole|> } finally {
SLog.setEnabled(true);
}
}
@Test
public void testStaticUtilsVarArgs() {
String expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize", null));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;_MISSING_KEY_0=null;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize", null, null));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize", "size"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;size=null;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize", "size", null));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;_MISSING_KEY_0=large;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize", null, "large"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;_MISSING_KEY_0=large;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize", null, "large", "newkey"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;size=large;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize", "size", "large"));
}
@Test
public void testStaticUtils() {
String expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;size=large;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize", "size", "large"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;";
Assert.assertEquals(expected, SLogCall.log("Coffee Time", "selectCupSize"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=A;size=large;";
Assert.assertEquals(expected, SLogStart.log("Coffee Time", "selectCupSize", "size", "large"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=A;";
Assert.assertEquals(expected, SLogStart.log("Coffee Time", "selectCupSize"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=F;size=large;";
Assert.assertEquals(expected, SLogStop.log("Coffee Time", "selectCupSize", "size", "large"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=F;";
Assert.assertEquals(expected, SLogStop.log("Coffee Time", "selectCupSize"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;+MC=1;size=large;";
Assert.assertEquals(expected, SLogBroadcastSend.log("Coffee Time", "selectCupSize", "size", "large"));
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;+MC=1;";
Assert.assertEquals(expected, SLogBroadcastSend.log("Coffee Time", "selectCupSize"));
expected = "$SPG$+T=Coffee Time;+O=bcastId;+M=A;+OA=selectCupSize;size=large;";
Assert.assertEquals(expected, SLogBroadcastStart.log("Coffee Time", "bcastId", "selectCupSize","size", "large"));
expected = "$SPG$+T=Coffee Time;+O=bcastId;+M=A;+OA=selectCupSize;";
Assert.assertEquals(expected, SLogBroadcastStart.log("Coffee Time", "bcastId", "selectCupSize"));
expected = "$SPG$+T=Coffee Time;+O=bcastId;+M=F;+OA=selectCupSize;size=large;";
Assert.assertEquals(expected, SLogBroadcastStop.log("Coffee Time", "bcastId", "selectCupSize","size", "large"));
expected = "$SPG$+T=Coffee Time;+O=bcastId;+M=F;+OA=selectCupSize;";
Assert.assertEquals(expected, SLogBroadcastStop.log("Coffee Time", "bcastId", "selectCupSize"));
}
@Test
public void testSunnyDay() {
String expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;size=large;";
Assert.assertEquals(expected, new SLogCall("Coffee Time", "selectCupSize")
.withUserData("size", "large").toString());
Map<String, String> userData = new HashMap<String, String>();
userData.put("size", "large");
Assert.assertEquals(expected, new SLogCall("Coffee Time", "selectCupSize")
.withUserData(userData).toString());
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;size=large;size1=large;size2=large;size3=large;size4=large;size5=large;";
Assert.assertEquals(expected, new SLogCall("Coffee Time", "selectCupSize")
.withUserData("size", "large")
.withUserData("size1", "large")
.withUserData("size2", "large")
.withUserData("size3", "large")
.withUserData("size4", "large")
.withUserData("size5", "large").toString());
}
@Test
public void testOptionalParams() {
String expected = "$SPG$+T=Coffee Time;+O=pumpWater;+M=A;+I^=100ms;";
Assert.assertEquals(expected, new SLogStart("Coffee Time", "pumpWater")
.withInstrumentationOverride(100, SLog.TimeNotation.MILLIS)
.toString());
expected = "$SPG$+T=Coffee Time;+O=coffeeComplete;+M=F;+OA=ensureCapacity;+C^=WaterReservoir;";
Assert.assertEquals(expected, new SLogStop("Coffee Time", "coffeeComplete")
.withOperationAlias("ensureCapacity")
.withComponentOverride("WaterReservoir")
.toString());
}
@Test
public void testMissingParams() {
String expected = "$SPG$+T=_MISSING_TASK_;+O=_MISSING_OPERATION_;+M=S;";
Assert.assertEquals(expected, new SLog(null, null, null)
.toString());
expected = "$SPG$+T=Coffee Time;+O=selectCupSize;+M=S;_MISSING_KEY_0=large;";
Assert.assertEquals(expected, new SLogCall("Coffee Time", "selectCupSize")
.withUserData(null, "large").toString());
}
@Test
public void testEscaping() {
Assert.assertEquals("abcd", SLog.escape("abcd"));
Assert.assertEquals("ab\\ncd", SLog.escape("ab\ncd"));
Assert.assertNull(SLog.escape(null));
Assert.assertEquals("", SLog.escape(""));
Assert.assertEquals("ab\\=cd", SLog.escape("ab=cd"));
Assert.assertEquals("ab\\;cd", SLog.escape("ab;cd"));
Assert.assertEquals("ab\\\\cd", SLog.escape("ab\\cd"));
}
@Test
public void testEscapingLog() {
String expected = "$SPG$+T=file\\; opened;+O=\\\\open;+M=S;+OA=\\=1;r\\=sr=/Users/dimitarz/\\;filename.log;";
Assert.assertEquals(expected, new SLog(null, null, null)
.withUserData("r=sr", "/Users/dimitarz/;filename.log")
.withOperation("\\open")
.withOperationAlias("=1")
.withTask("file; opened")
.toString());
}
}<|fim▁end|> | .withUserData("size", "large")
.withInstrumentationOverride(0, null)
.toString()); |
<|file_name|>optimizer.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Transforms a display list to produce a visually-equivalent, but cheaper-to-paint, one.
use display_list::{DisplayItem, DisplayList, StackingContext};
use std::collections::linked_list::LinkedList;
use geom::rect::Rect;
use util::geometry::{self, Au};
use std::sync::Arc;
/// Transforms a display list to produce a visually-equivalent, but cheaper-to-paint, one.
pub struct DisplayListOptimizer {
/// The visible rect in page coordinates.
visible_rect: Rect<Au>,
}
impl DisplayListOptimizer {
/// Creates a new display list optimizer object. `visible_rect` specifies the visible rect in
/// page coordinates.
pub fn new(visible_rect: &Rect<f32>) -> DisplayListOptimizer {
DisplayListOptimizer {
visible_rect: geometry::f32_rect_to_au_rect(*visible_rect),
}
}
/// Optimizes the given display list, returning an equivalent, but cheaper-to-paint, one.
pub fn optimize(self, display_list: &DisplayList) -> DisplayList {
let mut result = DisplayList::new();
self.add_in_bounds_display_items(&mut result.background_and_borders,
display_list.background_and_borders.iter());
self.add_in_bounds_display_items(&mut result.block_backgrounds_and_borders,
display_list.block_backgrounds_and_borders.iter());
self.add_in_bounds_display_items(&mut result.floats, display_list.floats.iter());
self.add_in_bounds_display_items(&mut result.content, display_list.content.iter());
self.add_in_bounds_display_items(&mut result.positioned_content,
display_list.positioned_content.iter());
self.add_in_bounds_display_items(&mut result.outlines, display_list.outlines.iter());
self.add_in_bounds_stacking_contexts(&mut result.children, display_list.children.iter());
result
}
/// Adds display items that intersect the visible rect to `result_list`.
fn add_in_bounds_display_items<'a,I>(&self,
result_list: &mut LinkedList<DisplayItem>,
display_items: I)<|fim▁hole|> if self.visible_rect.intersects(&display_item.base().bounds) &&
display_item.base().clip.might_intersect_rect(&self.visible_rect) {
result_list.push_back((*display_item).clone())
}
}
}
/// Adds child stacking contexts whose boundaries intersect the visible rect to `result_list`.
fn add_in_bounds_stacking_contexts<'a,I>(&self,
result_list: &mut LinkedList<Arc<StackingContext>>,
stacking_contexts: I)
where I: Iterator<Item=&'a Arc<StackingContext>> {
for stacking_context in stacking_contexts {
let overflow = stacking_context.overflow.translate(&stacking_context.bounds.origin);
if self.visible_rect.intersects(&overflow) {
result_list.push_back((*stacking_context).clone())
}
}
}
}<|fim▁end|> | where I: Iterator<Item=&'a DisplayItem> {
for display_item in display_items { |
<|file_name|>na.rs<|end_file_name|><|fim▁begin|>use failure::Error;
use regex::Captures;
use crate::captures::CapturesExt;
use crate::details::Details;<|fim▁hole|>pub(crate) fn na_handler(
popularity: &mut Popularity,
sink: &mut Vec<Details>,
captures: Captures,
) -> Result<(), Error> {
let point = usize::from_str_radix(captures.name_ok("point")?, 16)?;
let alias = captures.name_ok("alias")?;
let r#type = captures.name_ok("type")?;
if ["figment", "control", "correction"].contains(&r#type) {
sink[point].name = Some(popularity.vote(alias));
}
Ok(())
}<|fim▁end|> | use crate::pool::Popularity;
|
<|file_name|>wicked.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2017
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware
# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.choiceboxext import ChoiceBoxExt
myagent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:40.0) Gecko/20100101 Firefox/40.0'
BASE_NAME = "WickedPictures.com"
class wickedGenreScreen(MPScreen):
def __init__(self, session):
MPScreen.__init__(self, session, skin='MP_Plugin')
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"cancel" : self.keyCancel
}, -1)
self['title'] = Label(BASE_NAME)
self['ContentTitle'] = Label("Genre:")
self.genreliste = []
self.suchString = ''
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.genreliste.insert(0, ("Exclusive Girls", 'http://www.wicked.com/tour/pornstars/exclusive/', None))
self.genreliste.insert(0, ("Most Active Girls", 'http://www.wicked.com/tour/pornstars/mostactive/', None))
self.genreliste.insert(0, ("Most Liked Girls", 'http://www.wicked.com/tour/pornstars/mostliked/', None))
self.genreliste.insert(0, ("Most Recent Girls", 'http://www.wicked.com/tour/pornstars/mostrecent/', None))
self.genreliste.insert(0, ("Most Viewed Movies", 'http://www.wicked.com/tour/movies/mostviewed/', None))
self.genreliste.insert(0, ("Top Rated Movies", 'http://www.wicked.com/tour/movies/toprated/', None))
self.genreliste.insert(0, ("Latest Movies", 'http://www.wicked.com/tour/movies/latest/', None))
self.genreliste.insert(0, ("Most Viewed Scenes", 'http://www.wicked.com/tour/videos/mostviewed/', None))
self.genreliste.insert(0, ("Top Rated Scenes", 'http://www.wicked.com/tour/videos/toprated/', None))
self.genreliste.insert(0, ("Latest Scenes", 'http://www.wicked.com/tour/videos/latest/', None))
self.genreliste.insert(0, ("--- Search ---", "callSuchen", None))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.showInfos()
def keyOK(self):
if not config.mediaportal.premiumize_use.value:
message = self.session.open(MessageBoxExt, _("%s only works with enabled MP premiumize.me option (MP Setup)!" % BASE_NAME), MessageBoxExt.TYPE_INFO, timeout=10)
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
if Name == "--- Search ---":
self.suchen()
elif re.match(".*?Girls", Name):
self.session.open(wickedGirlsScreen, Link, Name)
else:
self.session.open(wickedFilmScreen, Link, Name)
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback
Name = "--- Search ---"
Link = self.suchString.replace(' ', '-')
self.session.open(wickedFilmScreen, Link, Name)
class wickedGirlsScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_Plugin')
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label(BASE_NAME)
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
url = "%s%s/" % (self.Link, str(self.page))
getPage(url, agent=myagent).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self.getLastPage(data, 'class="paginationui-container(.*?)</ul>', '.*(?:\/|>)(\d+)')
parse = re.search('class="showcase-models(.*?)</section>', data, re.S)
Movies = re.findall('<a\shref="(.*?)"\sclass="showcase-models.*?img\ssrc="(.*?)"\stitle="(.*?)".*?scenes">(\d+)\sScenes', parse.group(1), re.S)
if Movies:
for (Url, Image, Title, Scenes) in Movies:
Url = "http://www.wicked.com" + Url
Title = Title + " - %s Scenes" % Scenes
self.filmliste.append((decodeHtml(Title), Url, Image))
if len(self.filmliste) == 0:
self.filmliste.append((_('No pornstars found!'), None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, int(self.lastpage), mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
pic = self['liste'].getCurrent()[0][2]
self['name'].setText(title)
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
if Link:
rangelist = [['Scenes', 'videos/'], ['Movies', 'movies/']]
self.session.openWithCallback(self.keyOK2, ChoiceBoxExt, title=_('Select Action'), list = rangelist)
def keyOK2(self, result):
if result:
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
Link = Link + result[1]
self.session.open(wickedFilmScreen, Link, Name)
class wickedFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_Plugin')
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label(BASE_NAME)
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 9
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)<|fim▁hole|> self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
if re.match(".*?Search", self.Name):
url = "http://www.wicked.com/tour/search/videos/%s/%s/" % (self.Link, str(self.page))
else:
url = "%s%s/" % (self.Link, str(self.page))
getPage(url, agent=myagent).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
if re.match(".*?Search", self.Name):
self.getLastPage(data, 'class="paginationui-container(.*?)</ul>', '.*(?:\/|>)(\d+)')
elif re.match(".*?/tour/pornstar", self.Link):
self.getLastPage(data, 'class="paginationui-container(.*?)</ul>', '.*(?:\/|>)(\d+)')
else:
self['page'].setText(str(self.page) + ' / ' + str(self.lastpage))
parse = re.search('lass="showcase-movies">(.*?)</section>', data, re.S)
if parse:
Movies = re.findall('<a\shref="(.*?)"\sclass="showcase-movies.*?img\ssrc="(.*?)"\salt=".*?"\stitle="(.*?)"', parse.group(1), re.S)
else:
parse = re.search('class="showcase-scenes">(.*?)</section>', data, re.S)
if parse:
Movies = re.findall('<a\shref="(.*?)"\sclass="showcase-scenes.*?img\ssrc="(.*?)"\stitle=".*?"\salt="(.*?)"', parse.group(1), re.S)
if Movies:
for (Url, Image, Title) in Movies:
Image = Image.replace('_2.jpg','_1.jpg')
Url = "http://www.wicked.com" + Url
self.filmliste.append((decodeHtml(Title), Url, Image))
if len(self.filmliste) == 0:
self.filmliste.append((_('No videos found!'), '', None, ''))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, int(self.lastpage), mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
pic = self['liste'].getCurrent()[0][2]
self['name'].setText(title)
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
get_stream_link(self.session).check_link(Link, self.play)
def play(self, url):
title = self['liste'].getCurrent()[0][0]
self.session.open(SimplePlayer, [(title, url.replace('%2F','%252F').replace('%3D','%253D').replace('%2B','%252B'))], showPlaylist=False, ltype='wicked')<|fim▁end|> | |
<|file_name|>ModelBuildPath.java<|end_file_name|><|fim▁begin|>/*
* #%~
* org.overture.ide.core
* %%
* Copyright (C) 2008 - 2014 Overture
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-3.0.html>.
* #~%
*/
package org.overture.ide.core.resources;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.List;
import java.util.Vector;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.eclipse.core.resources.IContainer;
import org.eclipse.core.resources.IFolder;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IPath;
import org.overture.ide.core.VdmCore;
import org.overture.ide.internal.core.ResourceManager;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
public class ModelBuildPath
{
final IVdmProject vdmProject;
final IProject project;
final File modelPathFile;
List<IContainer> srcPaths = new Vector<IContainer>();
IContainer output;
IContainer library;
public ModelBuildPath(IVdmProject project)
{
this.vdmProject = project;
this.project = (IProject) this.vdmProject.getAdapter(IProject.class);
IPath base = this.project.getLocation();
base = base.append(".modelpath");
this.modelPathFile = base.toFile();
this.output = this.project.getFolder("generated");
this.library = this.project.getFolder("lib");
parse();
}
private boolean hasModelPath()
{
return this.modelPathFile.exists();
}
private IContainer getDefaultModelSrcPath()
{
return this.project;
}
public List<IContainer> getModelSrcPaths()
{
List<IContainer> tmp = new Vector<IContainer>(srcPaths.size());
tmp.addAll(srcPaths);
return tmp;
}
public synchronized IContainer getOutput()
{
return this.output;
}
public synchronized IContainer getLibrary()
{
return this.library;
}
private synchronized void parse()
{
if (!hasModelPath())<|fim▁hole|> return;
}
try
{
File file = this.modelPathFile;
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc = db.parse(file);
doc.getDocumentElement().normalize();
NodeList nodeLst = doc.getElementsByTagName("modelpathentry");
for (int s = 0; s < nodeLst.getLength(); s++)
{
Node fstNode = nodeLst.item(s);
if (fstNode.getNodeType() == Node.ELEMENT_NODE)
{
Node kindAttribute = fstNode.getAttributes().getNamedItem("kind");
String kindValue = kindAttribute.getNodeValue();
if (kindValue != null)
{
if (kindValue.equals("src"))
{
Node pathAttribute = fstNode.getAttributes().getNamedItem("path");
String pathValue = pathAttribute.getNodeValue();
if(pathValue.equals("."))
{
add(getDefaultModelSrcPath());
}else
{
add(this.project.getFolder(pathValue));
}
} else if (kindValue.equals("output"))
{
Node pathAttribute = fstNode.getAttributes().getNamedItem("path");
String pathValue = pathAttribute.getNodeValue();
output = this.project.getFolder(pathValue);
} else if (kindValue.equals("library"))
{
Node pathAttribute = fstNode.getAttributes().getNamedItem("path");
String pathValue = pathAttribute.getNodeValue();
library = this.project.getFolder(pathValue);
}
}
}
}
if(srcPaths.isEmpty())
{
srcPaths.add(getDefaultModelSrcPath());
}
} catch (Exception e)
{
VdmCore.log("Faild to parse .modelpath file", e);
}
}
public synchronized void setOutput(IContainer container)
{
this.output = container;
}
public synchronized void setLibrary(IContainer container)
{
this.library = container;
}
public synchronized void add(IContainer container)
{
if(container instanceof IProject)
{
srcPaths.clear();
}
else if(container instanceof IFolder)
{
String fullPath = container.getProjectRelativePath().toString();
boolean flag = true;
for (IContainer s : srcPaths)
{
flag = flag && s.getProjectRelativePath().toString().startsWith(fullPath);
}
if(flag)
srcPaths.clear();
}
if (!srcPaths.contains(container))
{
srcPaths.add(container);
}
}
public synchronized void remove(IContainer container)
{
if (srcPaths.contains(container))
{
srcPaths.remove(container);
}
}
public synchronized boolean contains(IContainer container)
{
return srcPaths.contains(container);
}
public synchronized void save() throws CoreException
{
StringBuffer sb = new StringBuffer();
sb.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
sb.append("<modelpath>\n");
for (IContainer src : srcPaths)
{
if (src.getProjectRelativePath().toString().length() > 0)
{
sb.append("\t<modelpathentry kind=\"src\" path=\""
+ src.getProjectRelativePath() + "\"/>\n");
}else if (src instanceof IProject)
{
sb.append("\t<modelpathentry kind=\"src\" path=\".\"/>\n");
}
}
if (output != null
&& output.getProjectRelativePath().toString().length() > 0)
{
sb.append("\t<modelpathentry kind=\"output\" path=\""
+ output.getProjectRelativePath() + "\"/>\n");
}
if (library != null
&& library.getProjectRelativePath().toString().length() > 0)
{
sb.append("\t<modelpathentry kind=\"library\" path=\""
+ library.getProjectRelativePath() + "\"/>\n");
}
sb.append("</modelpath>");
PrintWriter out = null;
try
{
FileWriter outFile = new FileWriter(this.modelPathFile);
out = new PrintWriter(outFile);
out.println(sb.toString());
} catch (IOException e)
{
VdmCore.log("Faild to save .modelpath file", e);
} finally
{
if (out != null)
{
out.close();
}
}
ResourceManager.getInstance().syncBuildPath(vdmProject);
}
/**
* Reload the build path and discard any un-saved changes
*/
public void reload()
{
parse();
}
}<|fim▁end|> | {
srcPaths.add(getDefaultModelSrcPath()); |
<|file_name|>get_users_emails.py<|end_file_name|><|fim▁begin|>from settings.secure import OAUTH_TOKEN, CANVAS_URL
from canvas_sdk.methods import courses
from canvas_sdk.utils import get_all_list_data
from canvas_sdk import RequestContext
import sys
# Get the course ID from the command line
course_id = None
if len(sys.argv) == 2:
course_id = sys.argv[1]
else:
sys.exit("Error: missing course_id")
# Setup the request context with a large pagination limit (minimize # of requests)
request_context = RequestContext(OAUTH_TOKEN, CANVAS_URL, per_page=100)
# NOTE: you must use get_all_list_data() in order to follow the paginated results
# and get all the data.
#
# If you just call the method directly, you'll get a single page (max 100 results)
# which may or may not include everyone if there are >100 students in the course.
results = get_all_list_data(request_context, courses.list_users_in_course_users, course_id, "email", enrollment_type="student")
# Extract and sort the results we want.
users = sorted([(x['email'], x['name']) for x in results], key=lambda x: x[0])
<|fim▁hole|><|fim▁end|> | # Print the names and emails in CSV foramt
for idx, user in enumerate(users):
print "%s,%s" % user |
<|file_name|>gtc_noise.cpp<|end_file_name|><|fim▁begin|>#define GLM_ENABLE_EXPERIMENTAL
#include <glm/gtc/noise.hpp>
#include <glm/gtc/type_precision.hpp>
#include <glm/gtx/raw_data.hpp>
int test_simplex()
{
int Error = 0;
glm::u8vec4 const PixelSimplex2D(glm::byte(glm::abs(glm::simplex(glm::vec2(0.f, 0.f))) * 255.f));
glm::u8vec4 const PixelSimplex3D(glm::byte(glm::abs(glm::simplex(glm::vec3(0.f, 0.f, 0.f))) * 255.f));
glm::u8vec4 const PixelSimplex4D(glm::byte(glm::abs(glm::simplex(glm::vec4(0.f, 0.f, 0.f, 0.f))) * 255.f));
return Error;
}
int test_perlin()
{
int Error = 0;
glm::u8vec4 const PixelPerlin2D(glm::byte(glm::abs(glm::perlin(glm::vec2(0.f, 0.f))) * 255.f));
glm::u8vec4 const PixelPerlin3D(glm::byte(glm::abs(glm::perlin(glm::vec3(0.f, 0.f, 0.f))) * 255.f));
glm::u8vec4 const PixelPerlin4D(glm::byte(glm::abs(glm::perlin(glm::vec4(0.f, 0.f, 0.f, 0.f))) * 255.f));
return Error;
}
int test_perlin_pedioric()
{
int Error = 0;
glm::u8vec4 const PixelPeriodic2D(glm::byte(glm::abs(glm::perlin(glm::vec2(0.f, 0.f), glm::vec2(2.0f))) * 255.f));
glm::u8vec4 const PixelPeriodic3D(glm::byte(glm::abs(glm::perlin(glm::vec3(0.f, 0.f, 0.f), glm::vec3(2.0f))) * 255.f));
glm::u8vec4 const PixelPeriodic4D(glm::byte(glm::abs(glm::perlin(glm::vec4(0.f, 0.f, 0.f, 0.f), glm::vec4(2.0f))) * 255.f));
return Error;
}
<|fim▁hole|>int main()
{
int Error = 0;
Error += test_simplex();
Error += test_perlin();
Error += test_perlin_pedioric();
return Error;
}<|fim▁end|> | |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/<|fim▁hole|><|fim▁end|> |
export { EditUsers } from './EditUsers'; |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | from django.views.generic import * |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup
setup(
name='g1.databases',
packages=[
'g1.databases',
],
install_requires=[
'SQLAlchemy',
'g1.bases',
],
extras_require={
'parts': [
'g1.apps',<|fim▁hole|> ],
},
zip_safe=False,
)<|fim▁end|> | |
<|file_name|>Elements.ts<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2017 ~ present NAVER Corp.
* billboard.js project is licensed under the MIT license
*/
// @ts-nocheck
import CLASS from "./classes";
import {getCentroid, isString, parseDate} from "./util";
/**
* Stanford diagram plugin element class
* @class ColorScale
* @param {Stanford} owner Stanford instance
* @private
*/
export default class Elements {
private owner;
constructor(owner) {
this.owner = owner;
// MEMO: Avoid blocking eventRect
const elements = owner.$$.$el.main.select(".bb-chart")
.append("g")
.attr("class", CLASS.stanfordElements);
elements.append("g").attr("class", CLASS.stanfordLines);
elements.append("g").attr("class", CLASS.stanfordRegions);
}
updateStanfordLines(duration: number): void {
const {$$} = this.owner;
const {config, $el: {main}} = $$;
const isRotated = config.axis_rotated;
const xvCustom = this.xvCustom.bind($$);
const yvCustom = this.yvCustom.bind($$);
// Stanford-Lines
const stanfordLine = main.select(`.${CLASS.stanfordLines}`)
.style("shape-rendering", "geometricprecision")
.selectAll(`.${CLASS.stanfordLine}`)
.data(this.owner.config.lines);
// exit
stanfordLine.exit().transition()
.duration(duration)
.style("opacity", "0")
.remove();
// enter
const stanfordLineEnter = stanfordLine.enter().append("g");
stanfordLineEnter.append("line")
.style("opacity", "0");
stanfordLineEnter
.merge(stanfordLine)
.attr("class", d => CLASS.stanfordLine + (d.class ? ` ${d.class}` : ""))
.select("line")
.transition()
.duration(duration)
.attr("x1", d => (isRotated ? yvCustom(d, "y1") : xvCustom(d, "x1")))
.attr("x2", d => (isRotated ? yvCustom(d, "y2") : xvCustom(d, "x2")))
.attr("y1", d => (isRotated ? xvCustom(d, "x1") : yvCustom(d, "y1")))
.attr("y2", d => (isRotated ? xvCustom(d, "x2") : yvCustom(d, "y2")))
.transition()
.style("opacity", null);
}
updateStanfordRegions(duration: number): void {
const {$$} = this.owner;
const {config, $el: {main}} = $$;
const isRotated = config.axis_rotated;
const xvCustom = this.xvCustom.bind($$);
const yvCustom = this.yvCustom.bind($$);
const countPointsInRegion = this.owner.countEpochsInRegion.bind($$);
// Stanford-Regions
let stanfordRegion = main.select(`.${CLASS.stanfordRegions}`)
.selectAll(`.${CLASS.stanfordRegion}`)
.data(this.owner.config.regions);
// exit
stanfordRegion.exit().transition()
.duration(duration)
.style("opacity", "0")
.remove();
// enter
const stanfordRegionEnter = stanfordRegion.enter().append("g");
stanfordRegionEnter.append("polygon")
.style("opacity", "0");
stanfordRegionEnter.append("text")
.attr("transform", isRotated ? "rotate(-90)" : "")
.style("opacity", "0");
stanfordRegion = stanfordRegionEnter.merge(stanfordRegion);
// update
stanfordRegion
.attr("class", d => CLASS.stanfordRegion + (d.class ? ` ${d.class}` : ""))
.select("polygon")
.transition()
.duration(duration)
.attr("points", d => d.points.map(value => [
isRotated ? yvCustom(value, "y") : xvCustom(value, "x"),
isRotated ? xvCustom(value, "x") : yvCustom(value, "y")
].join(",")).join(" "))
.transition()
.style("opacity", d => String(d.opacity ? d.opacity : 0.2));
stanfordRegion.select("text")
.transition()
.duration(duration)
.attr("x", d => (isRotated ? yvCustom(getCentroid(d.points), "y") : xvCustom(getCentroid(d.points), "x")))
.attr("y", d => (isRotated ? xvCustom(getCentroid(d.points), "x") : yvCustom(getCentroid(d.points), "y")))
.text(d => {
if (d.text) {
const {value, percentage} = countPointsInRegion(d.points);
return d.text(value, percentage);
}
return "";
})
.attr("text-anchor", "middle")
.attr("dominant-baseline", "middle")
.transition()
.style("opacity", null);
}
updateStanfordElements(duration = 0): void {
this.updateStanfordLines(duration);
this.updateStanfordRegions(duration);
}
xvCustom(d, xyValue): number {
const $$ = this;
const {axis, config} = $$;
let value = xyValue ? d[xyValue] : $$.getBaseValue(d);
if (axis.isTimeSeries()) {
value = parseDate.call($$, value);
} else if (axis.isCategorized() && isString(value)) {
value = config.axis_x_categories.indexOf(d.value);
}
return Math.ceil($$.scale.x(value));
}
<|fim▁hole|> const value = xyValue ? d[xyValue] : $$.getBaseValue(d);
return Math.ceil(yScale(value));
}
}<|fim▁end|> | yvCustom(d, xyValue): number {
const $$ = this;
const yScale = d.axis && d.axis === "y2" ? $$.scale.y2 : $$.scale.y; |
<|file_name|>init.py<|end_file_name|><|fim▁begin|>#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from color import Coloring
from command import InteractiveCommand, MirrorSafeCommand
from error import ManifestParseError
from project import SyncBuffer
from git_command import git_require, MIN_GIT_VERSION
class Init(InteractiveCommand, MirrorSafeCommand):
common = True
helpSummary = "Initialize repo in the current directory"
helpUsage = """
%prog [options]
"""
helpDescription = """
The '%prog' command is run once to install and initialize repo.
The latest repo source code and manifest collection is downloaded
from the server and is installed in the .repo/ directory in the
current working directory.
The optional -b argument can be used to select the manifest branch
to checkout and use. If no branch is specified, master is assumed.
The optional -m argument can be used to specify an alternate manifest
to be used. If no manifest is specified, the manifest default.xml
will be used.
The --reference option can be used to point to a directory that
has the content of a --mirror sync. This will make the working
directory use as much data as possible from the local reference
directory when fetching from the server. This will make the sync
go a lot faster by reducing data traffic on the network.
Switching Manifest Branches
---------------------------
To switch to another manifest branch, `repo init -b otherbranch`
may be used in an existing client. However, as this only updates the
manifest, a subsequent `repo sync` (or `repo sync -d`) is necessary
to update the working directory files.
"""
def _Options(self, p):
# Logging
g = p.add_option_group('Logging options')
g.add_option('-q', '--quiet',
dest="quiet", action="store_true", default=False,
help="be quiet")
# Manifest
g = p.add_option_group('Manifest options')
g.add_option('-u', '--manifest-url',
dest='manifest_url',
help='manifest repository location', metavar='URL')
g.add_option('-b', '--manifest-branch',
dest='manifest_branch',
help='manifest branch or revision', metavar='REVISION')
g.add_option('-m', '--manifest-name',
dest='manifest_name', default='default.xml',
help='initial manifest file', metavar='NAME.xml')
g.add_option('--mirror',
dest='mirror', action='store_true',
help='mirror the forrest')
g.add_option('--reference',
dest='reference',
help='location of mirror directory', metavar='DIR')
# Tool
g = p.add_option_group('repo Version options')
g.add_option('--repo-url',
dest='repo_url',
help='repo repository location', metavar='URL')
g.add_option('--repo-branch',
dest='repo_branch',
help='repo branch or revision', metavar='REVISION')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
def _SyncManifest(self, opt):
m = self.manifest.manifestProject
is_new = not m.Exists
if is_new:
if not opt.manifest_url:
print >>sys.stderr, 'fatal: manifest url (-u) is required.'
sys.exit(1)
if not opt.quiet:
print >>sys.stderr, 'Getting manifest ...'
print >>sys.stderr, ' from %s' % opt.manifest_url
m._InitGitDir()
if opt.manifest_branch:
m.revisionExpr = opt.manifest_branch
else:
m.revisionExpr = 'refs/heads/master'
else:
if opt.manifest_branch:
m.revisionExpr = opt.manifest_branch<|fim▁hole|> else:
m.PreSync()
if opt.manifest_url:
r = m.GetRemote(m.remote.name)
r.url = opt.manifest_url
r.ResetFetch()
r.Save()
if opt.reference:
m.config.SetString('repo.reference', opt.reference)
if opt.mirror:
if is_new:
m.config.SetString('repo.mirror', 'true')
else:
print >>sys.stderr, 'fatal: --mirror not supported on existing client'
sys.exit(1)
if not m.Sync_NetworkHalf():
r = m.GetRemote(m.remote.name)
print >>sys.stderr, 'fatal: cannot obtain manifest %s' % r.url
sys.exit(1)
syncbuf = SyncBuffer(m.config)
m.Sync_LocalHalf(syncbuf)
syncbuf.Finish()
if is_new or m.CurrentBranch is None:
if not m.StartBranch('default'):
print >>sys.stderr, 'fatal: cannot create default in manifest'
sys.exit(1)
def _LinkManifest(self, name):
if not name:
print >>sys.stderr, 'fatal: manifest name (-m) is required.'
sys.exit(1)
try:
self.manifest.Link(name)
except ManifestParseError, e:
print >>sys.stderr, "fatal: manifest '%s' not available" % name
print >>sys.stderr, 'fatal: %s' % str(e)
sys.exit(1)
def _Prompt(self, prompt, value):
mp = self.manifest.manifestProject
sys.stdout.write('%-10s [%s]: ' % (prompt, value))
a = sys.stdin.readline().strip()
if a == '':
return value
return a
def _ConfigureUser(self):
mp = self.manifest.manifestProject
while True:
print ''
name = self._Prompt('Your Name', mp.UserName)
email = self._Prompt('Your Email', mp.UserEmail)
print ''
print 'Your identity is: %s <%s>' % (name, email)
sys.stdout.write('is this correct [y/n]? ')
a = sys.stdin.readline().strip()
if a in ('yes', 'y', 't', 'true'):
break
if name != mp.UserName:
mp.config.SetString('user.name', name)
if email != mp.UserEmail:
mp.config.SetString('user.email', email)
def _HasColorSet(self, gc):
for n in ['ui', 'diff', 'status']:
if gc.Has('color.%s' % n):
return True
return False
def _ConfigureColor(self):
gc = self.manifest.globalConfig
if self._HasColorSet(gc):
return
class _Test(Coloring):
def __init__(self):
Coloring.__init__(self, gc, 'test color display')
self._on = True
out = _Test()
print ''
print "Testing colorized output (for 'repo diff', 'repo status'):"
for c in ['black','red','green','yellow','blue','magenta','cyan']:
out.write(' ')
out.printer(fg=c)(' %-6s ', c)
out.write(' ')
out.printer(fg='white', bg='black')(' %s ' % 'white')
out.nl()
for c in ['bold','dim','ul','reverse']:
out.write(' ')
out.printer(fg='black', attr=c)(' %-6s ', c)
out.nl()
sys.stdout.write('Enable color display in this user account (y/n)? ')
a = sys.stdin.readline().strip().lower()
if a in ('y', 'yes', 't', 'true', 'on'):
gc.SetString('color.ui', 'auto')
def Execute(self, opt, args):
git_require(MIN_GIT_VERSION, fail=True)
self._SyncManifest(opt)
self._LinkManifest(opt.manifest_name)
if os.isatty(0) and os.isatty(1) and not self.manifest.IsMirror:
self._ConfigureUser()
self._ConfigureColor()
if self.manifest.IsMirror:
type = 'mirror '
else:
type = ''
print ''
print 'repo %sinitialized in %s' % (type, self.manifest.topdir)<|fim▁end|> | |
<|file_name|>command.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
from .logger import logger, ctx
def find_exec(executable):
exec_exists = os.path.exists(executable)
return executable if exec_exists else shutil.which(executable)
# Decorator running a command and returning stdout
class capture_stdout:
def __init__(self, strip=False):
self.strip = strip
def __call__(self, f):
def strip_it(x):
return x.strip() if self.strip else x
def wrapper(*argv, **kwargs):
# Ensure stdout is captured
kwargs["stdout"] = subprocess.PIPE
return strip_it(f(*argv, **kwargs).stdout)
return wrapper
class Command:
""" A runnable command.
Class inheriting from the Command class must provide the bin
property/attribute.
"""
def run(self, *argv, **kwargs):
assert(hasattr(self, "bin"))
invocation = [find_exec(self.bin)]
invocation.extend(argv)
for key in ["stdout", "stderr"]:
# Preserve caller intention, otherwise silence
if key not in kwargs and ctx.quiet:
kwargs[key] = subprocess.PIPE<|fim▁hole|>
logger.debug(f"Executing `{invocation}`")
return subprocess.run(invocation, **kwargs)
def __call__(self, *argv, **kwargs):
self.run(*argv, **kwargs)<|fim▁end|> |
# Prefer safe by default
if "check" not in kwargs:
kwargs["check"] = True |
<|file_name|>test_context.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
Unit tests of context.py.
"""
from datetime import datetime
import unittest
from pystache.context import _NOT_FOUND, _get_value, KeyNotFoundError, ContextStack
from pystache.tests.common import AssertIsMixin, AssertStringMixin, AssertExceptionMixin, Attachable
class SimpleObject(object):
"""A sample class that does not define __getitem__()."""
def __init__(self):
self.foo = "bar"
def foo_callable(self):
return "called..."
class DictLike(object):
"""A sample class that implements __getitem__() and __contains__()."""
def __init__(self):
self._dict = {'foo': 'bar'}
self.fuzz = 'buzz'
def __contains__(self, key):
return key in self._dict
def __getitem__(self, key):
return self._dict[key]
class GetValueTestCase(unittest.TestCase, AssertIsMixin):
"""Test context._get_value()."""
def assertNotFound(self, item, key):
"""
Assert that a call to _get_value() returns _NOT_FOUND.
"""
self.assertIs(_get_value(item, key), _NOT_FOUND)
### Case: the item is a dictionary.
def test_dictionary__key_present(self):
"""
Test getting a key from a dictionary.
"""
item = {"foo": "bar"}
self.assertEqual(_get_value(item, "foo"), "bar")
def test_dictionary__callable_not_called(self):
"""
Test that callable values are returned as-is (and in particular not called).
"""
def foo_callable(self):
return "bar"
item = {"foo": foo_callable}
self.assertNotEqual(_get_value(item, "foo"), "bar")
self.assertTrue(_get_value(item, "foo") is foo_callable)
def test_dictionary__key_missing(self):
"""
Test getting a missing key from a dictionary.
"""
item = {}
self.assertNotFound(item, "missing")
def test_dictionary__attributes_not_checked(self):
"""
Test that dictionary attributes are not checked.
"""
item = {1: 2, 3: 4}
# I was not able to find a "public" attribute of dict that is
# the same across Python 2/3.
attr_name = "__len__"
self.assertEqual(getattr(item, attr_name)(), 2)
self.assertNotFound(item, attr_name)
def test_dictionary__dict_subclass(self):
"""
Test that subclasses of dict are treated as dictionaries.
"""
class DictSubclass(dict): pass
item = DictSubclass()
item["foo"] = "bar"
self.assertEqual(_get_value(item, "foo"), "bar")
### Case: the item is an object.
def test_object__attribute_present(self):
"""
Test getting an attribute from an object.
"""
item = SimpleObject()
self.assertEqual(_get_value(item, "foo"), "bar")
def test_object__attribute_missing(self):
"""
Test getting a missing attribute from an object.
"""
item = SimpleObject()
self.assertNotFound(item, "missing")
def test_object__attribute_is_callable(self):
"""
Test getting a callable attribute from an object.
"""
item = SimpleObject()
self.assertEqual(_get_value(item, "foo_callable"), "called...")
def test_object__non_built_in_type(self):
"""
Test getting an attribute from an instance of a type that isn't built-in.
"""
item = datetime(2012, 1, 2)
self.assertEqual(_get_value(item, "day"), 2)
def test_object__dict_like(self):
"""
Test getting a key from a dict-like object (an object that implements '__getitem__').
"""
item = DictLike()
self.assertEqual(item["foo"], "bar")
self.assertNotFound(item, "foo")
def test_object__property__raising_exception(self):
"""
Test getting a property that raises an exception.
"""
class Foo(object):
@property
def bar(self):
return 1
@property
def baz(self):
raise ValueError("test")
foo = Foo()
self.assertEqual(_get_value(foo, 'bar'), 1)
self.assertNotFound(foo, 'missing')
self.assertRaises(ValueError, _get_value, foo, 'baz')
### Case: the item is an instance of a built-in type.
def test_built_in_type__integer(self):
"""
Test getting from an integer.
"""
class MyInt(int): pass
cust_int = MyInt(10)
pure_int = 10
# We have to use a built-in method like __neg__ because "public"
# attributes like "real" were not added to Python until Python 2.6,
# when the numeric type hierarchy was added:
#
# http://docs.python.org/library/numbers.html
#
self.assertEqual(cust_int.__neg__(), -10)
self.assertEqual(pure_int.__neg__(), -10)
self.assertEqual(_get_value(cust_int, '__neg__'), -10)
self.assertNotFound(pure_int, '__neg__')
def test_built_in_type__string(self):
"""
Test getting from a string.
"""
class MyStr(str): pass
item1 = MyStr('abc')
item2 = 'abc'
self.assertEqual(item1.upper(), 'ABC')
self.assertEqual(item2.upper(), 'ABC')<|fim▁hole|>
def test_built_in_type__list(self):
"""
Test getting from a list.
"""
class MyList(list): pass
item1 = MyList([1, 2, 3])
item2 = [1, 2, 3]
self.assertEqual(item1.pop(), 3)
self.assertEqual(item2.pop(), 3)
self.assertEqual(_get_value(item1, 'pop'), 2)
self.assertNotFound(item2, 'pop')
class ContextStackTestCase(unittest.TestCase, AssertIsMixin, AssertStringMixin,
AssertExceptionMixin):
"""
Test the ContextStack class.
"""
def test_init__no_elements(self):
"""
Check that passing nothing to __init__() raises no exception.
"""
context = ContextStack()
def test_init__many_elements(self):
"""
Check that passing more than two items to __init__() raises no exception.
"""
context = ContextStack({}, {}, {})
def test__repr(self):
context = ContextStack()
self.assertEqual(repr(context), 'ContextStack()')
context = ContextStack({'foo': 'bar'})
self.assertEqual(repr(context), "ContextStack({'foo': 'bar'},)")
context = ContextStack({'foo': 'bar'}, {'abc': 123})
self.assertEqual(repr(context), "ContextStack({'foo': 'bar'}, {'abc': 123})")
def test__str(self):
context = ContextStack()
self.assertEqual(str(context), 'ContextStack()')
context = ContextStack({'foo': 'bar'})
self.assertEqual(str(context), "ContextStack({'foo': 'bar'},)")
context = ContextStack({'foo': 'bar'}, {'abc': 123})
self.assertEqual(str(context), "ContextStack({'foo': 'bar'}, {'abc': 123})")
## Test the static create() method.
def test_create__dictionary(self):
"""
Test passing a dictionary.
"""
context = ContextStack.create({'foo': 'bar'})
self.assertEqual(context.get('foo'), 'bar')
def test_create__none(self):
"""
Test passing None.
"""
context = ContextStack.create({'foo': 'bar'}, None)
self.assertEqual(context.get('foo'), 'bar')
def test_create__object(self):
"""
Test passing an object.
"""
class Foo(object):
foo = 'bar'
context = ContextStack.create(Foo())
self.assertEqual(context.get('foo'), 'bar')
def test_create__context(self):
"""
Test passing a ContextStack instance.
"""
obj = ContextStack({'foo': 'bar'})
context = ContextStack.create(obj)
self.assertEqual(context.get('foo'), 'bar')
def test_create__kwarg(self):
"""
Test passing a keyword argument.
"""
context = ContextStack.create(foo='bar')
self.assertEqual(context.get('foo'), 'bar')
def test_create__precedence_positional(self):
"""
Test precedence of positional arguments.
"""
context = ContextStack.create({'foo': 'bar'}, {'foo': 'buzz'})
self.assertEqual(context.get('foo'), 'buzz')
def test_create__precedence_keyword(self):
"""
Test precedence of keyword arguments.
"""
context = ContextStack.create({'foo': 'bar'}, foo='buzz')
self.assertEqual(context.get('foo'), 'buzz')
## Test the get() method.
def test_get__single_dot(self):
"""
Test getting a single dot (".").
"""
context = ContextStack("a", "b")
self.assertEqual(context.get("."), "b")
def test_get__single_dot__missing(self):
"""
Test getting a single dot (".") with an empty context stack.
"""
context = ContextStack()
self.assertException(KeyNotFoundError, "Key '.' not found: empty context stack", context.get, ".")
def test_get__key_present(self):
"""
Test getting a key.
"""
context = ContextStack({"foo": "bar"})
self.assertEqual(context.get("foo"), "bar")
def test_get__key_missing(self):
"""
Test getting a missing key.
"""
context = ContextStack()
self.assertException(KeyNotFoundError, "Key 'foo' not found: first part", context.get, "foo")
def test_get__precedence(self):
"""
Test that get() respects the order of precedence (later items first).
"""
context = ContextStack({"foo": "bar"}, {"foo": "buzz"})
self.assertEqual(context.get("foo"), "buzz")
def test_get__fallback(self):
"""
Check that first-added stack items are queried on context misses.
"""
context = ContextStack({"fuzz": "buzz"}, {"foo": "bar"})
self.assertEqual(context.get("fuzz"), "buzz")
def test_push(self):
"""
Test push().
"""
key = "foo"
context = ContextStack({key: "bar"})
self.assertEqual(context.get(key), "bar")
context.push({key: "buzz"})
self.assertEqual(context.get(key), "buzz")
def test_pop(self):
"""
Test pop().
"""
key = "foo"
context = ContextStack({key: "bar"}, {key: "buzz"})
self.assertEqual(context.get(key), "buzz")
item = context.pop()
self.assertEqual(item, {"foo": "buzz"})
self.assertEqual(context.get(key), "bar")
def test_top(self):
key = "foo"
context = ContextStack({key: "bar"}, {key: "buzz"})
self.assertEqual(context.get(key), "buzz")
top = context.top()
self.assertEqual(top, {"foo": "buzz"})
# Make sure calling top() didn't remove the item from the stack.
self.assertEqual(context.get(key), "buzz")
def test_copy(self):
key = "foo"
original = ContextStack({key: "bar"}, {key: "buzz"})
self.assertEqual(original.get(key), "buzz")
new = original.copy()
# Confirm that the copy behaves the same.
self.assertEqual(new.get(key), "buzz")
# Change the copy, and confirm it is changed.
new.pop()
self.assertEqual(new.get(key), "bar")
# Confirm the original is unchanged.
self.assertEqual(original.get(key), "buzz")
def test_dot_notation__dict(self):
name = "foo.bar"
stack = ContextStack({"foo": {"bar": "baz"}})
self.assertEqual(stack.get(name), "baz")
# Works all the way down
name = "a.b.c.d.e.f.g"
stack = ContextStack({"a": {"b": {"c": {"d": {"e": {"f": {"g": "w00t!"}}}}}}})
self.assertEqual(stack.get(name), "w00t!")
def test_dot_notation__user_object(self):
name = "foo.bar"
stack = ContextStack({"foo": Attachable(bar="baz")})
self.assertEqual(stack.get(name), "baz")
# Works on multiple levels, too
name = "a.b.c.d.e.f.g"
A = Attachable
stack = ContextStack({"a": A(b=A(c=A(d=A(e=A(f=A(g="w00t!"))))))})
self.assertEqual(stack.get(name), "w00t!")
def test_dot_notation__mixed_dict_and_obj(self):
name = "foo.bar.baz.bak"
stack = ContextStack({"foo": Attachable(bar={"baz": Attachable(bak=42)})})
self.assertEqual(stack.get(name), 42)
def test_dot_notation__missing_attr_or_key(self):
name = "foo.bar.baz.bak"
stack = ContextStack({"foo": {"bar": {}}})
self.assertException(KeyNotFoundError, "Key 'foo.bar.baz.bak' not found: missing 'baz'", stack.get, name)
stack = ContextStack({"foo": Attachable(bar=Attachable())})
self.assertException(KeyNotFoundError, "Key 'foo.bar.baz.bak' not found: missing 'baz'", stack.get, name)
def test_dot_notation__missing_part_terminates_search(self):
"""
Test that dotted name resolution terminates on a later part not found.
Check that if a later dotted name part is not found in the result from
the former resolution, then name resolution terminates rather than
starting the search over with the next element of the context stack.
From the spec (interpolation section)--
5) If any name parts were retained in step 1, each should be resolved
against a context stack containing only the result from the former
resolution. If any part fails resolution, the result should be considered
falsey, and should interpolate as the empty string.
This test case is equivalent to the test case in the following pull
request:
https://github.com/mustache/spec/pull/48
"""
stack = ContextStack({'a': {'b': 'A.B'}}, {'a': 'A'})
self.assertEqual(stack.get('a'), 'A')
self.assertException(KeyNotFoundError, "Key 'a.b' not found: missing 'b'", stack.get, "a.b")
stack.pop()
self.assertEqual(stack.get('a.b'), 'A.B')
def test_dot_notation__autocall(self):
name = "foo.bar.baz"
# When any element in the path is callable, it should be automatically invoked
stack = ContextStack({"foo": Attachable(bar=Attachable(baz=lambda: "Called!"))})
self.assertEqual(stack.get(name), "Called!")
class Foo(object):
def bar(self):
return Attachable(baz='Baz')
stack = ContextStack({"foo": Foo()})
self.assertEqual(stack.get(name), "Baz")<|fim▁end|> |
self.assertEqual(_get_value(item1, 'upper'), 'ABC')
self.assertNotFound(item2, 'upper') |
<|file_name|>myjQuery.js<|end_file_name|><|fim▁begin|><|fim▁hole|>/**
* Created by zhang on 16/5/19.
*/
$.myjq = function () {
alert("hello my jQuery")
};
//js的扩展
$.fn.myjq=function(){
$(this).text("hello")
};<|fim▁end|> | |
<|file_name|>12148_Electricity.py<|end_file_name|><|fim▁begin|># Problem name: 12148 Electricity
# Problem url: https://uva.onlinejudge.org/external/121/12148.pdf
# Author: Andrey Yemelyanov
import sys
import math
import datetime
def readline():
return sys.stdin.readline().strip()
def main():
while True:
n_readings = int(readline())
if n_readings == 0:
break<|fim▁hole|> date = datetime.date(reading[2], reading[1], reading[0])
consumption = reading[3]
meter_readings.append((date, consumption))
c = get_daily_consumption(meter_readings)
print(len(c), sum(c))
def get_daily_consumption(meter_readings):
c = []
for i in range(len(meter_readings)):
if i > 0:
current_date = meter_readings[i][0]
current_consumption = meter_readings[i][1]
prev_date = meter_readings[i - 1][0]
prev_consumption = meter_readings[i - 1][1]
if prev_date + datetime.timedelta(days = 1) == current_date:
c.append(current_consumption - prev_consumption)
return c
if __name__=="__main__":
main()<|fim▁end|> | meter_readings = []
for i in range(n_readings):
reading = [int(x) for x in readline().split()] |
<|file_name|>test_storage_queue.py<|end_file_name|><|fim▁begin|># coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import datetime
import unittest
from requests import Session
from azure.storage import (
AccessPolicy,
SharedAccessPolicy,
SignedIdentifier,
SignedIdentifiers,
)
from azure.storage.queue import (
QueueService,
QueueSharedAccessPermissions,
)
from azure.common import (
AzureHttpError,
AzureConflictHttpError,
AzureMissingResourceHttpError,
)
from tests.common_recordingtestcase import (
TestMode,
record,
)
from tests.storage_testcase import StorageTestCase
#------------------------------------------------------------------------------
TEST_QUEUE_PREFIX = 'mytestqueue'
#------------------------------------------------------------------------------
class StorageQueueTest(StorageTestCase):
def setUp(self):
super(StorageQueueTest, self).setUp()
self.qs = self._create_storage_service(QueueService, self.settings)
self.test_queues = []
self.creatable_queues = []
for i in range(10):
self.test_queues.append(self.get_resource_name(TEST_QUEUE_PREFIX + str(i)))
for i in range(4):
self.creatable_queues.append(
self.get_resource_name('mycreatablequeue' + str(i)))
if not self.is_playback():
for queue_name in self.test_queues:
self.qs.create_queue(queue_name)
def tearDown(self):
if not self.is_playback():
for queue_name in self.test_queues:
try:
self.qs.delete_queue(queue_name)
except:
pass
for queue_name in self.creatable_queues:
try:
self.qs.delete_queue(queue_name)
except:
pass
return super(StorageQueueTest, self).tearDown()
def _get_shared_access_policy(self, permission):
date_format = "%Y-%m-%dT%H:%M:%SZ"
start = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
expiry = start + datetime.timedelta(hours=1)
return SharedAccessPolicy(
AccessPolicy(
start.strftime(date_format),
expiry.strftime(date_format),
permission
)
)
@record
def test_get_service_properties(self):
# This api doesn't apply to local storage
if self.qs.use_local_storage:
return
# Action
properties = self.qs.get_queue_service_properties()
# Asserts
self.assertIsNotNone(properties)
self.assertIsNotNone(properties.logging)
self.assertIsNotNone(properties.logging.retention_policy)
self.assertIsNotNone(properties.logging.version)
self.assertIsNotNone(properties.hour_metrics)
self.assertIsNotNone(properties.hour_metrics.retention_policy)
self.assertIsNotNone(properties.hour_metrics.version)
self.assertIsNotNone(properties.minute_metrics)
self.assertIsNotNone(properties.minute_metrics.retention_policy)
self.assertIsNotNone(properties.minute_metrics.version)
@record
def test_set_service_properties(self):
# This api doesn't apply to local storage
if self.qs.use_local_storage:
return
# Action
queue_properties = self.qs.get_queue_service_properties()
queue_properties.logging.read = True
self.qs.set_queue_service_properties(queue_properties)
properties = self.qs.get_queue_service_properties()
# Asserts
self.assertIsNotNone(properties)
self.assertIsNotNone(properties.logging)
self.assertIsNotNone(properties.logging.retention_policy)
self.assertIsNotNone(properties.logging.version)
self.assertIsNotNone(properties.hour_metrics)
self.assertIsNotNone(properties.hour_metrics.retention_policy)
self.assertIsNotNone(properties.hour_metrics.version)
self.assertIsNotNone(properties.minute_metrics)
self.assertIsNotNone(properties.minute_metrics.retention_policy)
self.assertIsNotNone(properties.minute_metrics.version)
self.assertTrue(properties.logging.read)
@record
def test_create_queue(self):
# Action
self.qs.create_queue(self.creatable_queues[0])
result = self.qs.get_queue_metadata(self.creatable_queues[0])
self.qs.delete_queue(self.creatable_queues[0])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
@record
def test_create_queue_already_exist(self):
# Action
created1 = self.qs.create_queue(self.creatable_queues[0])
created2 = self.qs.create_queue(self.creatable_queues[0])
# Asserts
self.assertTrue(created1)
self.assertFalse(created2)
@record
def test_create_queue_fail_on_exist(self):
# Action
created = self.qs.create_queue(self.creatable_queues[0], None, True)
with self.assertRaises(AzureConflictHttpError):
self.qs.create_queue(self.creatable_queues[0], None, True)
# Asserts
self.assertTrue(created)
@record
def test_create_queue_with_options(self):
# Action
self.qs.create_queue(
self.creatable_queues[1],
x_ms_meta_name_values={'val1': 'test', 'val2': 'blah'})
result = self.qs.get_queue_metadata(self.creatable_queues[1])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(3, len(result))
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
self.assertEqual('test', result['x-ms-meta-val1'])
self.assertEqual('blah', result['x-ms-meta-val2'])
@record
def test_delete_queue_not_exist(self):
# Action
deleted = self.qs.delete_queue(self.creatable_queues[0])
# Asserts
self.assertFalse(deleted)
@record
def test_delete_queue_fail_not_exist_not_exist(self):
# Action
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.delete_queue(self.creatable_queues[0], True)
# Asserts
@record
def test_delete_queue_fail_not_exist_already_exist(self):
# Action
created = self.qs.create_queue(self.creatable_queues[0])
deleted = self.qs.delete_queue(self.creatable_queues[0], True)
# Asserts
self.assertTrue(created)
self.assertTrue(deleted)
@record
def test_list_queues(self):
# Action
queues = self.qs.list_queues()
for queue in queues:
pass
# Asserts
self.assertIsNotNone(queues)
self.assertEqual('', queues.marker)
self.assertEqual(0, queues.max_results)
self.assertTrue(len(self.test_queues) <= len(queues))
@record
def test_list_queues_with_options(self):
# Action
queues_1 = self.qs.list_queues(prefix=TEST_QUEUE_PREFIX, maxresults=3)
queues_2 = self.qs.list_queues(
prefix=TEST_QUEUE_PREFIX,
marker=queues_1.next_marker,
include='metadata')
# Asserts
self.assertIsNotNone(queues_1)
self.assertEqual(3, len(queues_1))
self.assertEqual(3, queues_1.max_results)
self.assertEqual('', queues_1.marker)
self.assertIsNotNone(queues_1[0])
self.assertIsNone(queues_1[0].metadata)
self.assertNotEqual('', queues_1[0].name)
# Asserts
self.assertIsNotNone(queues_2)
self.assertTrue(len(self.test_queues) - 3 <= len(queues_2))
self.assertEqual(0, queues_2.max_results)
self.assertEqual(queues_1.next_marker, queues_2.marker)
self.assertIsNotNone(queues_2[0])
self.assertIsNotNone(queues_2[0].metadata)
self.assertNotEqual('', queues_2[0].name)
@record
def test_set_queue_metadata(self):
# Action
self.qs.create_queue(self.creatable_queues[2])
self.qs.set_queue_metadata(
self.creatable_queues[2],
x_ms_meta_name_values={'val1': 'test', 'val2': 'blah'})
result = self.qs.get_queue_metadata(self.creatable_queues[2])
self.qs.delete_queue(self.creatable_queues[2])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(3, len(result))
self.assertEqual('0', result['x-ms-approximate-messages-count'])
self.assertEqual('test', result['x-ms-meta-val1'])
self.assertEqual('blah', result['x-ms-meta-val2'])
@record
def test_put_message(self):
# Action. No exception means pass. No asserts needed.
self.qs.put_message(self.test_queues[0], 'message1')
self.qs.put_message(self.test_queues[0], 'message2')
self.qs.put_message(self.test_queues[0], 'message3')
self.qs.put_message(self.test_queues[0], 'message4')
@record
def test_get_messages(self):
# Action
self.qs.put_message(self.test_queues[1], 'message1')
self.qs.put_message(self.test_queues[1], 'message2')
self.qs.put_message(self.test_queues[1], 'message3')
self.qs.put_message(self.test_queues[1], 'message4')
result = self.qs.get_messages(self.test_queues[1])<|fim▁hole|> self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_get_messages_with_options(self):
# Action
self.qs.put_message(self.test_queues[2], 'message1')
self.qs.put_message(self.test_queues[2], 'message2')
self.qs.put_message(self.test_queues[2], 'message3')
self.qs.put_message(self.test_queues[2], 'message4')
result = self.qs.get_messages(
self.test_queues[2], numofmessages=4, visibilitytimeout=20)
# Asserts
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
for message in result:
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_peek_messages(self):
# Action
self.qs.put_message(self.test_queues[3], 'message1')
self.qs.put_message(self.test_queues[3], 'message2')
self.qs.put_message(self.test_queues[3], 'message3')
self.qs.put_message(self.test_queues[3], 'message4')
result = self.qs.peek_messages(self.test_queues[3])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertEqual('', message.pop_receipt)
self.assertEqual('0', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertEqual('', message.time_next_visible)
@record
def test_peek_messages_with_options(self):
# Action
self.qs.put_message(self.test_queues[4], 'message1')
self.qs.put_message(self.test_queues[4], 'message2')
self.qs.put_message(self.test_queues[4], 'message3')
self.qs.put_message(self.test_queues[4], 'message4')
result = self.qs.peek_messages(self.test_queues[4], numofmessages=4)
# Asserts
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
for message in result:
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertEqual('', message.pop_receipt)
self.assertEqual('0', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertEqual('', message.time_next_visible)
@record
def test_clear_messages(self):
# Action
self.qs.put_message(self.test_queues[5], 'message1')
self.qs.put_message(self.test_queues[5], 'message2')
self.qs.put_message(self.test_queues[5], 'message3')
self.qs.put_message(self.test_queues[5], 'message4')
self.qs.clear_messages(self.test_queues[5])
result = self.qs.peek_messages(self.test_queues[5])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(0, len(result))
@record
def test_delete_message(self):
# Action
self.qs.put_message(self.test_queues[6], 'message1')
self.qs.put_message(self.test_queues[6], 'message2')
self.qs.put_message(self.test_queues[6], 'message3')
self.qs.put_message(self.test_queues[6], 'message4')
result = self.qs.get_messages(self.test_queues[6])
self.qs.delete_message(
self.test_queues[6], result[0].message_id, result[0].pop_receipt)
result2 = self.qs.get_messages(self.test_queues[6], numofmessages=32)
# Asserts
self.assertIsNotNone(result2)
self.assertEqual(3, len(result2))
@record
def test_update_message(self):
# Action
self.qs.put_message(self.test_queues[7], 'message1')
list_result1 = self.qs.get_messages(self.test_queues[7])
self.qs.update_message(self.test_queues[7],
list_result1[0].message_id,
'new text',
list_result1[0].pop_receipt,
visibilitytimeout=0)
list_result2 = self.qs.get_messages(self.test_queues[7])
# Asserts
self.assertIsNotNone(list_result2)
message = list_result2[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('new text', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('2', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
def test_sas_read(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.READ),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.peek_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
def test_sas_add(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.ADD),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.put_message(self.test_queues[0], 'addedmessage')
# Assert
result = self.qs.get_messages(self.test_queues[0])
self.assertEqual('addedmessage', result[0].message_text)
def test_sas_update(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.UPDATE),
)
result = self.qs.get_messages(self.test_queues[0])
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
service.update_message(
self.test_queues[0],
result[0].message_id,
'updatedmessage1',
result[0].pop_receipt,
visibilitytimeout=0,
)
# Assert
result = self.qs.get_messages(self.test_queues[0])
self.assertEqual('updatedmessage1', result[0].message_text)
def test_sas_process(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.PROCESS),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.get_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
def test_sas_signed_identifier(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
si = SignedIdentifier()
si.id = 'testid'
si.access_policy.start = '2011-10-11'
si.access_policy.expiry = '2018-10-12'
si.access_policy.permission = QueueSharedAccessPermissions.READ
identifiers = SignedIdentifiers()
identifiers.signed_identifiers.append(si)
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
SharedAccessPolicy(signed_identifier=si.id),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.peek_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
@record
def test_get_queue_acl(self):
# Arrange
# Act
acl = self.qs.get_queue_acl(self.test_queues[0])
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
@record
def test_get_queue_acl_iter(self):
# Arrange
# Act
acl = self.qs.get_queue_acl(self.test_queues[0])
for signed_identifier in acl:
pass
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
self.assertEqual(len(acl), 0)
@record
def test_get_queue_acl_with_non_existing_queue(self):
# Arrange
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.get_queue_acl(self.creatable_queues[0])
# Assert
@record
def test_set_queue_acl(self):
# Arrange
# Act
resp = self.qs.set_queue_acl(self.test_queues[0])
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
@record
def test_set_queue_acl_with_empty_signed_identifiers(self):
# Arrange
# Act
identifiers = SignedIdentifiers()
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
@record
def test_set_queue_acl_with_signed_identifiers(self):
# Arrange
# Act
si = SignedIdentifier()
si.id = 'testid'
si.access_policy.start = '2011-10-11'
si.access_policy.expiry = '2011-10-12'
si.access_policy.permission = QueueSharedAccessPermissions.READ
identifiers = SignedIdentifiers()
identifiers.signed_identifiers.append(si)
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 1)
self.assertEqual(len(acl), 1)
self.assertEqual(acl.signed_identifiers[0].id, 'testid')
self.assertEqual(acl[0].id, 'testid')
@record
def test_set_queue_acl_with_non_existing_queue(self):
# Arrange
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.set_queue_acl(self.creatable_queues[0])
# Assert
@record
def test_with_filter(self):
# Single filter
called = []
def my_filter(request, next):
called.append(True)
return next(request)
qc = self.qs.with_filter(my_filter)
qc.put_message(self.test_queues[7], 'message1')
self.assertTrue(called)
del called[:]
# Chained filters
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
qc = self.qs.with_filter(filter_a).with_filter(filter_b)
qc.put_message(self.test_queues[7], 'message1')
self.assertEqual(called, ['b', 'a'])
@record
def test_unicode_create_queue_unicode_name(self):
# Action
self.creatable_queues[0] = u'啊齄丂狛狜'
with self.assertRaises(AzureHttpError):
# not supported - queue name must be alphanumeric, lowercase
self.qs.create_queue(self.creatable_queues[0])
# Asserts
@record
def test_unicode_get_messages_unicode_data(self):
# Action
self.qs.put_message(self.test_queues[1], u'message1㚈')
result = self.qs.get_messages(self.test_queues[1])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual(u'message1㚈', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_unicode_update_message_unicode_data(self):
# Action
self.qs.put_message(self.test_queues[7], 'message1')
list_result1 = self.qs.get_messages(self.test_queues[7])
self.qs.update_message(self.test_queues[7],
list_result1[0].message_id,
u'啊齄丂狛狜',
list_result1[0].pop_receipt,
visibilitytimeout=0)
list_result2 = self.qs.get_messages(self.test_queues[7])
# Asserts
self.assertIsNotNone(list_result2)
message = list_result2[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual(u'啊齄丂狛狜', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('2', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()<|fim▁end|> |
# Asserts
self.assertIsNotNone(result) |
<|file_name|>f2p.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
import os
import re
import itertools
from functools import reduce
from .version import __version__
sep_regex = re.compile(r'[ \-_~!@#%$^&*\(\)\[\]\{\}/\:;"|,./?`]')
def get_portable_filename(filename):
path, _ = os.path.split(__file__)
filename = os.path.join(path, filename)
return filename
def load_conversion_file(filename):
filename = get_portable_filename(filename)
with open(filename, encoding='utf-8') as f:
l = list(f)
l = [i for i in l if i.strip()]
l = [i.strip().split() for i in l]
return {i[0]: i[1:] for i in l}
print('Loading converters...')
beginning = load_conversion_file('f2p-beginning.txt')
middle = load_conversion_file('f2p-middle.txt')
ending = load_conversion_file('f2p-ending.txt')
print('Loading persian word list...')
with open(get_portable_filename('persian-word-freq.txt'), encoding='utf-8') as f:
word_freq = list(f)
word_freq = [i.strip() for i in word_freq if i.strip()]
word_freq = [i.split() for i in word_freq if not i.startswith('#')]
word_freq = {i[0]: int(i[1]) for i in word_freq}
print('Loading dictionary...')
with open(get_portable_filename('f2p-dict.txt'), encoding='utf-8') as f:
dictionary = [i.strip().split(' ', 1) for i in f if i.strip()]
dictionary = {k.strip(): v.strip() for k, v in dictionary}
def f2p_word_internal(word, original_word):
# this function receives the word as separate letters
persian = []
for i, letter in enumerate(word):
if i == 0:
converter = beginning
elif i == len(word) - 1:
converter = ending
else:
converter = middle
conversions = converter.get(letter)
if conversions == None:
return [(''.join(original_word), 0.0)]
else:
conversions = ['' if i == 'nothing' else i for i in conversions]
persian.append(conversions)
alternatives = itertools.product(*persian)
alternatives = [''.join(i) for i in alternatives]
alternatives = [(i, word_freq[i]) if i in word_freq else (i, 0)
for i in alternatives]
if len(alternatives) > 0:
max_freq = max(freq for _, freq in alternatives)
alternatives = [(w, float(freq / max_freq)) if freq != 0 else (w, 0.0)
for w, freq in alternatives]
else:
alternatives = [(''.join(word), 1.0)]
return alternatives
def variations(word):
"""Create variations of the word based on letter combinations like oo,
sh, etc."""
if word == 'a':
return [['A']]
elif len(word) == 1:
return [[word[0]]]
elif word == 'aa':
return [['A']]
elif word == 'ee':
return [['i']]
elif word == 'ei':
return [['ei']]
elif word in ['oo', 'ou']:
return [['u']]
elif word == 'kha':
return [['kha'], ['kh', 'a']]
elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:
return [[word]]
elif word in ["'ee", "'ei"]:
return [["'i"]]
elif word in ["'oo", "'ou"]:
return [["'u"]]
elif word in ["a'", "e'", "o'", "i'", "u'", "A'"]:
return [[word[0] + "'"]]
elif word in ["'a", "'e", "'o", "'i", "'u", "'A"]:
return [["'" + word[1]]]
elif len(word) == 2 and word[0] == word[1]:
return [[word[0]]]
if word[:2] == 'aa':
return [['A'] + i for i in variations(word[2:])]
elif word[:2] == 'ee':
return [['i'] + i for i in variations(word[2:])]
elif word[:2] in ['oo', 'ou']:
return [['u'] + i for i in variations(word[2:])]
elif word[:3] == 'kha':
return \
[['kha'] + i for i in variations(word[3:])] + \
[['kh', 'a'] + i for i in variations(word[3:])] + \
[['k', 'h', 'a'] + i for i in variations(word[3:])]
elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:
return \
[[word[:2]] + i for i in variations(word[2:])] + \
[[word[0]] + i for i in variations(word[1:])]
elif word[:2] in ["a'", "e'", "o'", "i'", "u'", "A'"]:
return [[word[:2]] + i for i in variations(word[2:])]
elif word[:3] in ["'ee", "'ei"]:
return [["'i"] + i for i in variations(word[3:])]
elif word[:3] in ["'oo", "'ou"]:
return [["'u"] + i for i in variations(word[3:])]
elif word[:2] in ["'a", "'e", "'o", "'i", "'u", "'A"]:
return [[word[:2]] + i for i in variations(word[2:])]
elif len(word) >= 2 and word[0] == word[1]:
return [[word[0]] + i for i in variations(word[2:])]
else:
return [[word[0]] + i for i in variations(word[1:])]
def f2p_word(word, max_word_size=15, cutoff=3):
"""Convert a single word from Finglish to Persian.
max_word_size: Maximum size of the words to consider. Words larger
than this will be kept unchanged.
cutoff: The cut-off point. For each word, there could be many
possibilities. By default 3 of these possibilities are considered
for each word. This number can be changed by this argument.
"""
original_word = word
word = word.lower()
c = dictionary.get(word)
if c:
return [(c, 1.0)]
if word == '':
return []
elif len(word) > max_word_size:
return [(original_word, 1.0)]
results = []
for w in variations(word):
results.extend(f2p_word_internal(w, original_word))
# sort results based on the confidence value
results.sort(key=lambda r: r[1], reverse=True)
# return the top three results in order to cut down on the number
# of possibilities.
return results[:cutoff]
def f2p_list(phrase, max_word_size=15, cutoff=3):
"""Convert a phrase from Finglish to Persian.
phrase: The phrase to convert.
max_word_size: Maximum size of the words to consider. Words larger
than this will be kept unchanged.
cutoff: The cut-off point. For each word, there could be many
possibilities. By default 3 of these possibilities are considered
for each word. This number can be changed by this argument.
Returns a list of lists, each sub-list contains a number of
possibilities for each word as a pair of (word, confidence)
values.
"""
# split the phrase into words
results = [w for w in sep_regex.split(phrase) if w]
# return an empty list if no words
if results == []:
return []
# convert each word separately
results = [f2p_word(w, max_word_size, cutoff) for w in results]
<|fim▁hole|> """Convert a Finglish phrase to the most probable Persian phrase.
"""
results = f2p_list(phrase, max_word_size, cutoff)
return ' '.join(i[0][0] for i in results)
def main():
print('Finglish to Persian Converter, v{}'.format(__version__))
print('finglish: ', end='')
phrase = input()
result = f2p(phrase)
print(result)
if __name__ == '__main__':
main()<|fim▁end|> | return results
def f2p(phrase, max_word_size=15, cutoff=3): |
<|file_name|>parser.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from .record import (
Metadata,
Record,
)
__all__ = ['Parser']
class Parser:
def __init__(self, store):
self.store = store
def parse_record(self, metadata, line):
factors = line.split('|')
if len(factors) < 7:
return
registry, cc, type_, start, value, dete, status = factors[:7]
if type_ not in ('ipv4', 'ipv6'):<|fim▁hole|> def do(self, fp):
metadata = None
for line in fp:
line = line[:-1]
if line.startswith('#') or line.endswith('summary'):
continue
if metadata is None:
version, registry, serial, records,\
startdate, enddate, utcoffset = line.split('|')[:7]
metadata = Metadata(registry, version, serial)
continue
record = self.parse_record(metadata, line)
if record is None:
continue
self.store.persist(record)<|fim▁end|> | return
return Record(metadata, start, type_, value, cc)
|
<|file_name|>Program.java<|end_file_name|><|fim▁begin|>package pp.iloc.model;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import pp.iloc.model.Num.NumKind;
import pp.iloc.model.Operand.Type;
import pp.iloc.parse.FormatException;
/** ILOC program.
* @author Arend Rensink
*/
public class Program {
/** Indexed list of all instructions in the program. */
private final List<Instr> instrList;
/**
* Indexed list of all operations in the program.
* This is the flattened list of instructions.
*/
private final List<Op> opList;
/** Mapping from labels defined in the program to corresponding
* index locations.
*/
private final Map<Label, Integer> labelMap;
/** (Partial) mapping from symbolic constants used in the program
* to corresponding numeric values. */
private final Map<Num, Integer> symbMap;
/** Creates a program with an initially empty instruction list. */
public Program() {
this.instrList = new ArrayList<>();
this.opList = new ArrayList<>();
this.labelMap = new LinkedHashMap<>();
this.symbMap = new LinkedHashMap<>();
}
/** Adds an instruction to the instruction list of this program.
* @throws IllegalArgumentException if the instruction has a known label
*/
public void addInstr(Instr instr) {
instr.setProgram(this);
instr.setLine(this.opList.size());
if (instr.hasLabel()) {
registerLabel(instr);<|fim▁hole|> this.instrList.add(instr);
for (Op op : instr) {
this.opList.add(op);
}
}
/** Registers the label of a given instruction. */
void registerLabel(Instr instr) {
Label label = instr.getLabel();
Integer loc = this.labelMap.get(label);
if (loc != null) {
throw new IllegalArgumentException(String.format(
"Label %s already occurred at location %d", label, loc));
}
this.labelMap.put(label, instr.getLine());
}
/** Returns the current list of instructions of this program. */
public List<Instr> getInstr() {
return Collections.unmodifiableList(this.instrList);
}
/** Returns the operation at a given line number. */
public Op getOpAt(int line) {
return this.opList.get(line);
}
/** Returns the size of the program, in number of operations. */
public int size() {
return this.opList.size();
}
/**
* Returns the location at which a given label is defined, if any.
* @return the location of an instruction with the label, or {@code -1}
* if the label is undefined
*/
public int getLine(Label label) {
Integer result = this.labelMap.get(label);
return result == null ? -1 : result;
}
/** Assigns a fixed numeric value to a symbolic constant.
* It is an error to assign to the same constant twice.
* @param name constant name, without preceding '@'
*/
public void setSymb(Num symb, int value) {
if (this.symbMap.containsKey(symb)) {
throw new IllegalArgumentException("Constant '" + symb
+ "' already assigned");
}
this.symbMap.put(symb, value);
}
/**
* Returns the value with which a given symbol has been
* initialised, if any.
*/
public Integer getSymb(Num symb) {
return this.symbMap.get(symb);
}
/**
* Returns the value with which a given named symbol has been
* initialised, if any.
* @param name name of the symbol, without '@'-prefix
*/
public Integer getSymb(String name) {
return getSymb(new Num(name));
}
/**
* Checks for internal consistency, in particular whether
* all used labels are defined.
*/
public void check() throws FormatException {
List<String> messages = new ArrayList<>();
for (Instr instr : getInstr()) {
for (Op op : instr) {
messages.addAll(checkOpnds(op.getLine(), op.getArgs()));
}
}
if (!messages.isEmpty()) {
throw new FormatException(messages);
}
}
private List<String> checkOpnds(int loc, List<Operand> opnds) {
List<String> result = new ArrayList<>();
for (Operand opnd : opnds) {
if (opnd instanceof Label) {
if (getLine((Label) opnd) < 0) {
result.add(String.format("Line %d: Undefined label '%s'",
loc, opnd));
}
}
}
return result;
}
/**
* Returns a mapping from registers to line numbers
* in which they appear.
*/
public Map<String, Set<Integer>> getRegLines() {
Map<String, Set<Integer>> result = new LinkedHashMap<>();
for (Op op : this.opList) {
for (Operand opnd : op.getArgs()) {
if (opnd.getType() == Type.REG) {
Set<Integer> ops = result.get(((Reg) opnd).getName());
if (ops == null) {
result.put(((Reg) opnd).getName(),
ops = new LinkedHashSet<>());
}
ops.add(op.getLine());
}
}
}
return result;
}
/**
* Returns a mapping from (symbolic) variables to line numbers
* in which they appear.
*/
public Map<String, Set<Integer>> getSymbLines() {
Map<String, Set<Integer>> result = new LinkedHashMap<>();
for (Op op : this.opList) {
for (Operand opnd : op.getArgs()) {
if (!(opnd instanceof Num)) {
continue;
}
if (((Num) opnd).getKind() != NumKind.SYMB) {
continue;
}
String name = ((Num) opnd).getName();
Set<Integer> lines = result.get(name);
if (lines == null) {
result.put(name, lines = new LinkedHashSet<>());
}
lines.add(op.getLine());
}
}
return result;
}
/** Returns a line-by-line printout of this program. */
@Override
public String toString() {
StringBuilder result = new StringBuilder();
for (Map.Entry<Num, Integer> symbEntry : this.symbMap.entrySet()) {
result.append(String.format("%s <- %d%n", symbEntry.getKey()
.getName(), symbEntry.getValue()));
}
for (Instr instr : getInstr()) {
result.append(instr.toString());
result.append('\n');
}
return result.toString();
}
@Override
public int hashCode() {
return this.instrList.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof Program)) {
return false;
}
Program other = (Program) obj;
if (!this.instrList.equals(other.instrList)) {
return false;
}
return true;
}
/** Returns a string consisting of this program in a nice layout.
*/
public String prettyPrint() {
StringBuilder result = new StringBuilder();
// first print the symbolic declaration map
int idSize = 0;
for (Num symb : this.symbMap.keySet()) {
idSize = Math.max(idSize, symb.getName().length());
}
for (Map.Entry<Num, Integer> symbEntry : this.symbMap.entrySet()) {
result.append(String.format("%-" + idSize + "s <- %d%n", symbEntry
.getKey().getName(), symbEntry.getValue()));
}
if (idSize > 0) {
result.append('\n');
}
// then print the instructions
int labelSize = 0;
int sourceSize = 0;
int targetSize = 0;
for (Instr i : getInstr()) {
labelSize = Math.max(labelSize, i.toLabelString().length());
if (i instanceof Op && ((Op) i).getOpCode() != OpCode.out) {
Op op = (Op) i;
sourceSize = Math.max(sourceSize, op.toSourceString().length());
targetSize = Math.max(targetSize, op.toTargetString().length());
}
}
for (Instr i : getInstr()) {
result.append(i.prettyPrint(labelSize, sourceSize, targetSize));
}
return result.toString();
}
}<|fim▁end|> | } |
<|file_name|>MessageUtil.java<|end_file_name|><|fim▁begin|>package com.weixin.util;
import java.io.IOException;
import java.io.InputStream;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import org.dom4j.DocumentException;
import org.dom4j.Element;
import org.dom4j.io.SAXReader;
import com.thoughtworks.xstream.XStream;
import com.weixin.vo.TextMessage;
<|fim▁hole|>/**
* @author : Jay
*/
public class MessageUtil {
/**
* xml 转为 map
* @param request
* @return
* @throws IOException
* @throws DocumentException
*/
public static Map<String, String> xmlToMap(HttpServletRequest request)
throws IOException, DocumentException {
Map<String, String> map = new HashMap<String, String>();
SAXReader reader = new SAXReader();
InputStream ins = request.getInputStream();
org.dom4j.Document doc = reader.read(ins);
Element root = doc.getRootElement();
List<Element> list = root.elements();
for (Element e : list) {
map.put(e.getName(), e.getText());
}
ins.close();
return map;
}
/**
* 将文本消息对象转为xml
* @return
*/
public static String textMessageToXml(TextMessage textMessage){
XStream xstream = new XStream();
xstream.alias("xml", textMessage.getClass());
return xstream.toXML(textMessage);
}
/**
* 初始化文本消
* @param toUserName
* @param fromUserName
* @param content
* @return
*/
public static String initText(String toUserName, String fromUserName,String content){
TextMessage text = new TextMessage();
text.setFromUserName(toUserName);
text.setToUserName(fromUserName);
text.setMsgType("text");
text.setCreateTime(new Date().getTime());
text.setContent(content);
return MessageUtil.textMessageToXml(text);
}
}<|fim▁end|> | |
<|file_name|>estimator.py<|end_file_name|><|fim▁begin|># Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.eager import context
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export as export_helpers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.summary import summary
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import device_setter
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.training import warm_starting_util
from tensorflow.python.util import compat
from tensorflow.python.util import compat_internal
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import estimator_export
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'self', 'config'])
@estimator_export('estimator.Estimator')
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The `Estimator` object wraps a model which is specified by a `model_fn`,
which, given inputs and a number of other parameters, returns the ops
necessary to perform training, evaluation, or predictions.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `tf.estimator.RunConfig` object containing
information about the execution environment. It is passed on to the
`model_fn`, if the `model_fn` has a parameter named "config" (and input
functions in the same manner). If the `config` parameter is not passed, it is
instantiated by the `Estimator`. Not passing config means that defaults useful
for local execution are used. `Estimator` makes config available to the model
(for instance, to allow specialization based on the number of workers
available), and also uses some of its fields to control internals, especially
regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. `Estimator` only passes params along, it does
not inspect it. The structure of `params` is therefore entirely up to the
developer.
None of `Estimator`'s methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use `model_fn` to configure
the base class, and may add methods implementing specialized functionality.
@compatibility(eager)
Calling methods of `Estimator` will work while eager execution is enabled.
However, the `model_fn` and `input_fn` is not executed eagerly, `Estimator`
will switch to graph model before calling all user-provided functions (incl.
hooks), so their code has to be compatible with graph mode execution. Note
that `input_fn` code using `tf.data` generally works in both graph and eager
modes.
@end_compatibility
"""
def __init__(self, model_fn, model_dir=None, config=None, params=None,
warm_start_from=None):
"""Constructs an `Estimator` instance.
See [estimators](https://tensorflow.org/guide/estimators) for more
information.
To warm-start an `Estimator`:
```python
estimator = tf.estimator.DNNClassifier(
feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
hidden_units=[1024, 512, 256],
warm_start_from="/path/to/checkpoint/dir")
```
For more details on warm-start configuration, see
`tf.estimator.WarmStartSettings`.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `tf.Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `tf.Tensor` or `dict` of same (for multi-head models).
If mode is @{tf.estimator.ModeKeys.PREDICT}, `labels=None` will
be passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `tf.estimator.ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional `estimator.RunConfig` object. Will receive what
is passed to Estimator as its `config` parameter, or a default
value. Allows setting up things in your `model_fn` based on
configuration such as `num_ps_replicas`, or `model_dir`.
* Returns:
`tf.estimator.EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into an estimator to
continue training a previously saved model. If `PathLike` object, the
path will be resolved. If `None`, the model_dir in `config` will be used
if set. If both are set, they must be same. If both are `None`, a
temporary directory will be used.
config: `estimator.RunConfig` configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings`
object to fully configure warm-starting. If the string
filepath is provided instead of a
`tf.estimator.WarmStartSettings`, then all variables are
warm-started, and it is assumed that vocabularies
and `tf.Tensor` names are unchanged.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
Estimator._assert_members_are_not_overridden(self)
self._config = maybe_overwrite_model_dir_and_session_config(config,
model_dir)
# The distribute field contains an instance of DistributionStrategy.
self._train_distribution = self._config.train_distribute
self._eval_distribution = self._config.eval_distribute
# Model directory.
self._model_dir = self._config.model_dir
self._session_config = self._config.session_config
logging.info('Using config: %s', str(vars(self._config)))
self._device_fn = (
self._config.device_fn or _get_replica_device_setter(self._config))
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = copy.deepcopy(params or {})
# pylint: disable=protected-access
self._warm_start_settings = _get_default_warm_start_settings(
warm_start_from)
# pylint: enable=protected-access
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
@property
def model_fn(self):
"""Returns the `model_fn` which is bound to `self.params`.
Returns:
The `model_fn` with following signature:
`def model_fn(features, labels, mode, config)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config)
return public_model_fn
# TODO(ispir): support a list of names
def get_variable_value(self, name):
"""Returns value of the variable given by name.
<|fim▁hole|> Numpy array - value of the tensor.
Raises:
ValueError: If the `Estimator` has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
with context.graph_mode():
return training.load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
Raises:
ValueError: If the `Estimator` has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
with context.graph_mode():
return [name for name, _ in training.list_variables(self.model_dir)]
def latest_checkpoint(self):
"""Finds the filename of the latest saved checkpoint file in `model_dir`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was
found.
"""
with context.graph_mode():
return checkpoint_management.latest_checkpoint(self.model_dir)
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
"""Trains a model given training data `input_fn`.
Args:
input_fn: A function that provides input data for training as minibatches.
See [Premade Estimators](
https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following: * A
`tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple
`(features, labels)` with same constraints as below. * A tuple
`(features, labels)`: Where `features` is a `tf.Tensor` or a dictionary
of string feature name to `Tensor` and `labels` is a `Tensor` or a
dictionary of string label name to `Tensor`. Both `features` and
`labels` are consumed by `model_fn`. They should satisfy the expectation
of `model_fn` from inputs.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
steps: Number of steps for which to train the model. If `None`, train
forever or train until `input_fn` generates the `tf.errors.OutOfRange`
error or `StopIteration` exception. `steps` works incrementally. If you
call two times `train(steps=10)` then training occurs in total 20 steps.
If `OutOfRange` or `StopIteration` occurs in the middle, training stops
before 20 steps. If you don't want to have incremental behavior please
set `max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until `input_fn` generates the
`tf.errors.OutOfRange` error or `StopIteration` exception. If set,
`steps` must be `None`. If `OutOfRange` or `StopIteration` occurs in the
middle, training stops before `max_steps` steps. Two calls to
`train(steps=100)` means 200 training iterations. On the other hand, two
calls to `train(max_steps=100)` means that the second call will not do
any iteration since first call did all 100 steps.
saving_listeners: list of `CheckpointSaverListener` objects. Used for
callbacks that run immediately before or after checkpoint savings.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps <= 0`.
"""
if self.config.task_type in (run_config.TaskType.EVALUATOR,
run_config.TaskType.PS):
raise ValueError(
'Train has been called wrong configuration. Please use '
'tf.estimator.train_and_evaluate which calls proper API according '
'to given configuration. Current configuration: {}.'.format(
self.config))
with context.graph_mode():
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_train_steps_to_hooks(steps, max_steps))
saving_listeners = _check_listeners_type(saving_listeners)
loss = self._train_model(input_fn, hooks, saving_listeners)
logging.info('Loss for final step: %s.', loss)
return self
def _convert_train_steps_to_hooks(self, steps, max_steps):
"""Create hooks to run correct number of steps in training.
Args:
steps: number of steps to run during training.
max_steps: maximum number of steps to be run during training. It'll be
the maximum number of steps the model will train to after restoring
from checkpoint even across multiple estimator.train calls.
Returns:
List of hooks to be passed to the estimator.
"""
if steps is not None or max_steps is not None:
if self._train_distribution:
steps_per_run = getattr(self._train_distribution, 'steps_per_run', 1)
if steps_per_run > 1:
return [basic_session_run_hooks._MultiStepStopAtStepHook( # pylint: disable=protected-access
steps, max_steps, steps_per_run)]
return [training.StopAtStepHook(steps, max_steps)]
else:
return []
def eval_dir(self, name=None):
"""Shows the directory name where evaluation metrics are dumped.
Args:
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A string which is the path of directory contains evaluation metrics.
"""
return os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data `input_fn`.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`tf.errors.OutOfRangeError`
or
`StopIteration`).
Args:
input_fn: A function that constructs the input data for evaluation. See
[Premade Estimators](
https://tensorflow.org/guide/premade#create_input_functions)
for more information. The
function should construct and return one of the following: * A
`tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple
`(features, labels)` with same constraints as below. * A tuple
`(features, labels)`: Where `features` is a `tf.Tensor` or a dictionary
of string feature name to `Tensor` and `labels` is a `Tensor` or a
dictionary of string label name to `Tensor`. Both `features` and
`labels` are consumed by `model_fn`. They should satisfy the expectation
of `model_fn` from inputs.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, evaluation is run with newly initialized `Variables`
instead of ones restored from checkpoint.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed. For canned
estimators, the dict contains the `loss` (mean loss per mini-batch) and
the `average_loss` (mean loss per sample). Canned classifiers also return
the `accuracy`. Canned regressors also return the `label/mean` and the
`prediction/mean`.
Raises:
ValueError: If `steps <= 0`.
ValueError: If no model has been trained, namely `model_dir`, or the
given `checkpoint_path` is empty.
"""
with context.graph_mode():
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_eval_steps_to_hooks(steps))
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not latest_path:
logging.info('Could not find trained model in model_dir: {}, running '
'initialization to evaluate.'.format(self._model_dir))
checkpoint_path = latest_path
def _evaluate():
(scaffold, update_op, eval_dict, all_hooks) = (
self._evaluate_build_graph(input_fn, hooks, checkpoint_path))
return self._evaluate_run(
checkpoint_path=checkpoint_path,
scaffold=scaffold,
update_op=update_op,
eval_dict=eval_dict,
all_hooks=all_hooks,
output_dir=self.eval_dir(name))
with ops.Graph().as_default():
if self._eval_distribution:
with self._eval_distribution.scope():
return _evaluate()
else:
return _evaluate()
def _convert_eval_steps_to_hooks(self, steps):
if steps is None:
return []
if steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
return [evaluation._StopAfterNEvalsHook(num_evals=steps)] # pylint: disable=protected-access
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
"""Yields predictions for given features.
Please note that interleaving two predict outputs does not work. See:
[issue/20506](
https://github.com/tensorflow/tensorflow/issues/20506#issuecomment-422208517)
Args:
input_fn: A function that constructs the features. Prediction continues
until `input_fn` raises an end-of-input exception
(`tf.errors.OutOfRangeError` or `StopIteration`).
See [Premade Estimators](
https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following:
* A `tf.data.Dataset` object: Outputs of `Dataset` object must have
same constraints as below.
* features: A `tf.Tensor` or a dictionary of string feature name to
`Tensor`. features are consumed by `model_fn`. They should satisfy
the expectation of `model_fn` from inputs.
* A tuple, in which case the first item is extracted as features.
predict_keys: list of `str`, name of the keys to predict. It is used if
the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If
`predict_keys` is used then rest of the predictions will be filtered
from the dictionary. If `None`, returns all.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, prediction is run with newly initialized `Variables`
instead of ones restored from checkpoint.
yield_single_examples: If `False`, yields the whole batch as returned by
the `model_fn` instead of decomposing the batch into individual
elements. This is useful if `model_fn` returns some tensors whose first
dimension is not equal to the batch size.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: Could not find a trained model in `model_dir`.
ValueError: If batch length of predictions is not the same and
`yield_single_examples` is `True`.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`tf.estimator.EstimatorSpec.predictions` is not a `dict`.
"""
with context.graph_mode():
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = checkpoint_management.latest_checkpoint(
self._model_dir)
if not checkpoint_path:
logging.info('Could not find trained model in model_dir: {}, running '
'initialization to predict.'.format(self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(g)
features, input_hooks = self._get_features_from_input_fn(
input_fn, model_fn_lib.ModeKeys.PREDICT)
estimator_spec = self._call_model_fn(
features, None, model_fn_lib.ModeKeys.PREDICT, self.config)
# Call to warm_start has to be after model_fn is called.
self._maybe_warm_start(checkpoint_path)
predictions = self._extract_keys(
estimator_spec.predictions, predict_keys)
all_hooks = list(input_hooks)
all_hooks.extend(hooks)
all_hooks.extend(list(estimator_spec.prediction_hooks or []))
with training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
master=self._config.master,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=all_hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not yield_single_examples:
yield preds_evaluated
elif not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
"""Asserts members of `Estimator` are not overridden."""
# TPUEstimator is special cased (owned by TF).
if self.__class__.__name__ == 'TPUEstimator':
return
allowed_overrides = set([
'_create_and_assert_global_step',
'_tf_api_names', '_tf_api_names_v1', '_estimator_api_names',
'_estimator_api_names_v1', '_estimator_api_constants',
'_estimator_api_constants_v1',
])
estimator_members = set([m for m in Estimator.__dict__.keys()
if not m.startswith('__')])
subclass_members = set(self.__class__.__dict__.keys())
common_members = estimator_members & subclass_members - allowed_overrides
overridden_members = [
m for m in common_members
if Estimator.__dict__[m] != self.__class__.__dict__[m]]
if overridden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(self.__class__, overridden_members))
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
# pylint: disable=line-too-long,g-doc-args,g-doc-return-or-yield
"""Exports inference graph as a `SavedModel` into the given dir.
Note that `export_to_savedmodel` will be renamed to `export_to_saved_model`
in TensorFlow 2.0. At that time, `export_to_savedmodel` without the
additional underscore will be available only through tf.compat.v1.
Please see `tf.estimator.Estimator.export_saved_model` for more information.
There is one additional arg versus the new method:
strip_default_attrs: This parameter is going away in TF 2.0, and
the new behavior will automatically strip all default attributes.
Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued Attributes](
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
"""
# pylint: enable=line-too-long,g-doc-args,g-doc-return-or-yield
return self._export_saved_model_for_mode(
export_dir_base,
serving_input_receiver_fn,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs,
mode=model_fn_lib.ModeKeys.PREDICT)
def export_saved_model(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
# pylint: disable=line-too-long
"""Exports inference graph as a `SavedModel` into the given dir.
For a detailed guide, see
[Using SavedModel with Estimators](https://tensorflow.org/guide/saved_model#using_savedmodel_with_estimators).
This method builds a new graph by first calling the
`serving_input_receiver_fn` to obtain feature `Tensor`s, and then calling
this `Estimator`'s `model_fn` to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given `export_dir_base`, and writes
a `SavedModel` into it containing a single `tf.MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the `export_outputs` dict returned from the `model_fn`, named
using
the same keys. One of these keys is always
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,
indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`tf.estimator.export.ExportOutput`s, and the inputs are always the input
receivers provided by
the `serving_input_receiver_fn`.
Extra assets may be written into the `SavedModel` via the `assets_extra`
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
serving_input_receiver_fn: A function that takes no argument and returns a
`tf.estimator.export.ServingInputReceiver` or
`tf.estimator.export.TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if no `serving_input_receiver_fn` is provided, no
`export_outputs` are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
# TODO(b/111442174): `export_to_savedmodel` will be renamed to
# `export_to_saved_model` in TensorFlow 2.0. This function is a wrapper
# while staging the new version; do not add any logic here.
return self.export_savedmodel(
export_dir_base,
serving_input_receiver_fn,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=True)
def _export_saved_model_for_mode(
self, export_dir_base, input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False,
mode=model_fn_lib.ModeKeys.PREDICT):
# pylint: disable=line-too-long
"""Exports a single train/eval/predict graph as a `SavedModel`.
This method is a wrapper for `_export_all_saved_models`, and wraps a raw
`input_receiver_fn` in a dictionary to pass in to that function.
See `_export_all_saved_models` for full docs.
See `tf.contrib.estimator.export_saved_model_for_mode` for the currently
exposed version of this function.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
input_receiver_fn: a function that takes no argument and returns the
appropriate subclass of `InputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
mode: `tf.estimator.ModeKeys` value indicating with mode will be exported.
Returns:
The string path to the exported directory.
Raises:
ValueError: if `input_receiver_fn` is `None`, no `export_outputs`
are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
if not input_receiver_fn:
raise ValueError('An input_receiver_fn must be defined.')
input_receiver_fn_map = {mode: input_receiver_fn}
return self._export_all_saved_models(
export_dir_base,
input_receiver_fn_map,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs)
def _export_all_saved_models(
self, export_dir_base, input_receiver_fn_map,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports a `SavedModel` containing `tf.MetaGraphDefs` for each requested mode.
See `tf.contrib.estimator.export_all_saved_models` for the currently
exposed version of this function.
For each mode passed in via the `input_receiver_fn_map`,
this method builds a new graph by calling the `input_receiver_fn` to obtain
feature and label `Tensor`s. Next, this method calls the `Estimator`'s
`model_fn` in the passed mode to generate the model graph based on
those features and labels, and restores the given checkpoint
(or, lacking that, the most recent checkpoint) into the graph.
Only one of the modes is used for saving variables to the `SavedModel`
(order of preference: @{tf.estimator.ModeKeys#TRAIN$TRAIN},
@{tf.estimator.ModeKeys#EVAL$EVAL}, then
@{tf.estimator.ModeKeys#PREDICT$PREDICT}), such that up to three
`tf.MetaGraphDefs` are saved with a single set of variables in a single
`SavedModel` directory.
For the variables and `tf.MetaGraphDefs`, a timestamped export directory
below
`export_dir_base`, and writes a `SavedModel` into it containing
the `tf.MetaGraphDef` for the given mode and its associated signatures.
For prediction, the exported `MetaGraphDef` will provide one `SignatureDef`
for each element of the `export_outputs` dict returned from the `model_fn`,
named using the same keys. One of these keys is always
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,
indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`tf.estimator.export.ExportOutput`s, and the inputs are always the input
receivers provided by
the `serving_input_receiver_fn`.
For training and evaluation, the `train_op` is stored in an extra
collection,
and loss, metrics, and predictions are included in a `SignatureDef` for the
mode in question.
Extra assets may be written into the `SavedModel` via the `assets_extra`
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to
`input_receiver_fn` mappings, where the `input_receiver_fn` is a
function that takes no arguments and returns the appropriate subclass of
`InputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
A dict of `tf.estimator.ModeKeys` value to string path for each exported
directory.
Raises:
ValueError: if any `input_receiver_fn` is `None`, no `export_outputs`
are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
# TODO(b/65561022): Consider allowing multiple input_receiver_fns per mode.
with context.graph_mode():
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = checkpoint_management.latest_checkpoint(
self._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s." % self._model_dir)
export_dir = export_helpers.get_timestamped_export_dir(export_dir_base)
temp_export_dir = export_helpers.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
save_variables = True
# Note that the order in which we run here matters, as the first
# mode we pass through will be used to save the variables. We run TRAIN
# first, as that is also the mode used for checkpoints, and therefore
# we are not likely to have vars in PREDICT that are not in the checkpoint
# created by TRAIN.
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.TRAIN):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.TRAIN)
save_variables = False
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.EVAL):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.EVAL)
save_variables = False
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.PREDICT):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.PREDICT)
save_variables = False
if save_variables:
raise ValueError('No valid modes for exporting found. Got {}.'.format(
input_receiver_fn_map.keys()))
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True):
# pylint: disable=line-too-long
"""Loads variables and adds them along with a `tf.MetaGraphDef` for saving.
Args:
builder: instance of `tf.saved_modle.builder.SavedModelBuilder` that will
be used for saving.
input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to
`input_receiver_fn` mappings, where the `input_receiver_fn` is a
function that takes no argument and returns the appropriate subclass of
`InputReceiver`.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_variables: bool, whether variables should be saved. If `False`, just
the `tf.MetaGraphDef` will be saved. Note that `save_variables` should
only be `True` for the first call to this function, and the
`SavedModelBuilder` will raise an error if that is not the case.
mode: `tf.estimator.ModeKeys` value indicating which mode will be
exported.
export_tags: The set of tags with which to save `tf.MetaGraphDef`. If
`None`, a default set will be selected to matched the passed mode.
check_variables: bool, whether to check the checkpoint has all variables.
Raises:
ValueError: if `save_variables` is `True` and `check_variable` is `False`.
"""
# pylint: enable=line-too-long
if export_tags is None:
export_tags = model_fn_lib.EXPORT_TAG_MAP[mode]
input_receiver_fn = input_receiver_fn_map[mode]
with ops.Graph().as_default() as g:
self._create_and_assert_global_step(g)
random_seed.set_random_seed(self._config.tf_random_seed)
input_receiver = input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=input_receiver.features,
labels=getattr(input_receiver, 'labels', None),
mode=mode,
config=self.config)
export_outputs = model_fn_lib.export_outputs_for_mode(
mode=estimator_spec.mode,
serving_export_outputs=estimator_spec.export_outputs,
predictions=estimator_spec.predictions,
loss=estimator_spec.loss,
metrics=estimator_spec.eval_metric_ops)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = export_helpers.build_all_signature_defs(
input_receiver.receiver_tensors,
export_outputs,
getattr(input_receiver, 'receiver_tensors_alternatives', None),
serving_only=(mode == model_fn_lib.ModeKeys.PREDICT))
with tf_session.Session(config=self._session_config) as session:
if estimator_spec.scaffold.local_init_op is not None:
local_init_op = estimator_spec.scaffold.local_init_op
else:
local_init_op = monitored_session.Scaffold.default_local_init_op()
# This saver will be used both for restoring variables now,
# and in saving out the metagraph below. This ensures that any
# Custom Savers stored with the Scaffold are passed through to the
# SavedModel for restore later.
graph_saver = estimator_spec.scaffold.saver or saver.Saver(sharded=True)
if save_variables and not check_variables:
raise ValueError('If `save_variables` is `True, `check_variables`'
'must not be `False`.')
if check_variables:
try:
graph_saver.restore(session, checkpoint_path)
except errors.NotFoundError as e:
msg = ('Could not load all requested variables from checkpoint. '
'Please make sure your model_fn does not expect variables '
'that were not saved in the checkpoint.\n\n'
'Encountered error with mode `{}` while restoring '
'checkpoint from: `{}`. Full Traceback:\n\n{}').format(
mode, checkpoint_path, e)
raise ValueError(msg)
# We add the train op explicitly for now, so that we don't have to
# change the Builder public interface. Note that this is a no-op
# for prediction, where train_op is None.
builder._add_train_op(estimator_spec.train_op) # pylint: disable=protected-access
meta_graph_kwargs = dict(
tags=export_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
strip_default_attrs=strip_default_attrs,
legacy_init_op=local_init_op,
saver=graph_saver)
if save_variables:
builder.add_meta_graph_and_variables(
session, **meta_graph_kwargs)
else:
builder.add_meta_graph(**meta_graph_kwargs)
def _get_features_from_input_fn(self, input_fn, mode):
"""Extracts the `features` from return values of `input_fn`."""
result = self._call_input_fn(input_fn, mode)
result, _, hooks = estimator_util.parse_input_fn_result(result)
self._validate_features_in_predict_input(result)
return result, hooks
def _validate_features_in_predict_input(self, result):
if not _has_dataset_or_queue_runner(result):
logging.warning('Input graph does not use tf.data.Dataset or contain a '
'QueueRunner. That means predict yields forever. '
'This is probably a mistake.')
def _get_iterator_from_input_fn(self, input_fn, mode, distribution=None):
if distribution is not None:
result = distribution.distribute_dataset(
lambda: self._call_input_fn(input_fn, mode))
else:
result = self._call_input_fn(input_fn, mode)
iterator = result.make_initializable_iterator()
input_hooks = [estimator_util._DatasetInitializerHook(iterator)] # pylint: disable=protected-access
return iterator, input_hooks
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
"""Extracts the `features` and labels from return values of `input_fn`."""
return estimator_util.parse_input_fn_result(
self._call_input_fn(input_fn, mode))
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length than others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _create_global_step(self, graph):
"""Creates the global step tensor in graph.
The global step tensor must be an integer type with name 'global_step' and
be added to the collection @{tf.GraphKeys#GLOBAL_STEP$GLOBAL_STEP}.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `tf.Tensor`.
"""
return training.create_global_step(graph)
def _create_and_assert_global_step(self, graph):
"""Creates and asserts properties of the global step.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `tf.Tensor`.
"""
step = self._create_global_step(graph)
assert step == training.get_global_step()
assert step.dtype.is_integer
return step
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: `tf.estimator.ModeKeys`
Returns:
The return value of the passed `input_fn`, which should be one of:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple `(features, labels)` with same constraints as below.
* A tuple `(features, labels)`: Where `features` is a `Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
Raises:
ValueError: if `input_fn` takes invalid arguments.
"""
input_fn_args = function_utils.fn_args(input_fn)
kwargs = {}
if 'mode' in input_fn_args:
kwargs['mode'] = mode
if 'params' in input_fn_args:
kwargs['params'] = self.params
if 'config' in input_fn_args:
kwargs['config'] = self.config
with ops.device('/cpu:0'):
return input_fn(**kwargs)
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: `tf.estimator.ModeKeys`
config: `tf.estimator.RunConfig`
Returns:
An `tf.estimator.EstimatorSpec` object.
Raises:
ValueError: if `model_fn` returns invalid objects.
"""
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
logging.info('Calling model_fn.')
model_fn_results = self._model_fn(features=features, **kwargs)
logging.info('Done calling model_fn.')
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks, saving_listeners):
if self._train_distribution:
return self._train_model_distributed(input_fn, hooks, saving_listeners)
else:
return self._train_model_default(input_fn, hooks, saving_listeners)
def _train_model_default(self, input_fn, hooks, saving_listeners):
"""Initiate training with `input_fn`, without `DistributionStrategies`.
Args:
input_fn: A function that provides input data for training as minibatches.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
saving_listeners: list of `tf.train.CheckpointSaverListener` objects. Used
for callbacks that run immediately before or after checkpoint savings.
Returns:
Loss from training
"""
worker_hooks = []
with ops.Graph().as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(g)
# Skip creating a read variable if _create_and_assert_global_step
# returns None (e.g. tf.contrib.estimator.SavedModelEstimator).
if global_step_tensor is not None:
training_util._get_or_create_global_step_read(g) # pylint: disable=protected-access
features, labels, input_hooks = (
self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.TRAIN))
worker_hooks.extend(input_hooks)
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
global_step_tensor = training_util.get_global_step(g)
return self._train_with_estimator_spec(estimator_spec, worker_hooks,
hooks, global_step_tensor,
saving_listeners)
def _train_model_distributed(self, input_fn, hooks, saving_listeners):
"""Initiate training with `input_fn`, using `DistributionStrategies`.
Args:
input_fn: A function that provides input data for training as minibatches.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
saving_listeners: list of `tf.train.CheckpointSaverListener` objects. Used
for callbacks that run immediately before or after checkpoint savings.
Returns:
Loss from training
"""
self._train_distribution.configure(self._session_config)
# TODO(sourabhbajaj): Remove this hack once we migrate the other strategies
# to use the new API
is_tpu_strategy = (
self._train_distribution.__class__.__name__ == 'TPUStrategy')
worker_hooks = []
with ops.Graph().as_default() as g:
# We want to create the iterations variable outside the distribution scope
# as that is just stored on the host and mainly used to drive the loop
# and doesn't need to be a Mirrored/Device variable.
if is_tpu_strategy:
steps_per_run_variable = training.get_or_create_steps_per_run_variable()
with self._train_distribution.scope():
random_seed.set_random_seed(self._config.tf_random_seed)
iterator, input_hooks = self._get_iterator_from_input_fn(
input_fn, model_fn_lib.ModeKeys.TRAIN, self._train_distribution)
worker_hooks.extend(input_hooks)
global_step_tensor = self._create_and_assert_global_step(g)
# we want to add to the global collection in the main thread not the
# tower threads.
ops.add_to_collection(
training_util.GLOBAL_STEP_READ_KEY,
self._train_distribution.read_var(global_step_tensor))
if is_tpu_strategy:
# Create a step_fn from the train_op of grouped_estimator_spec
def step_fn(ctx, features, labels=None):
"""A single step that is passed to run_on_dataset."""
estimator_spec = self._train_distribution.call_for_each_tower(
self._call_model_fn,
features,
labels,
model_fn_lib.ModeKeys.TRAIN,
self.config)
ctx.set_last_step_output(
name='loss',
output=estimator_spec.loss,
aggregation=distribute_lib.get_loss_reduction())
ctx.set_non_tensor_output(
name='estimator_spec', output=estimator_spec)
return estimator_spec.train_op
# Create new train_op post graph rewrites
initial_training_loss = constant_op.constant(1e7)
ctx = self._train_distribution.run_steps_on_dataset(
step_fn, iterator, iterations=steps_per_run_variable,
initial_loop_values={'loss': initial_training_loss})
distributed_train_op = ctx.run_op
loss = ctx.last_step_outputs['loss']
grouped_estimator_spec = ctx.non_tensor_outputs['estimator_spec']
else:
features, labels = estimator_util.parse_iterator_result(
iterator.get_next())
grouped_estimator_spec = self._train_distribution.call_for_each_tower(
self._call_model_fn,
features,
labels, # although this will be None it seems
model_fn_lib.ModeKeys.TRAIN,
self.config)
loss = self._train_distribution.unwrap(
self._train_distribution.reduce(
distribute_lib.get_loss_reduction(),
grouped_estimator_spec.loss,
destinations='/device:CPU:0'))[0]
distributed_train_op = grouped_estimator_spec.train_op
scaffold = _combine_distributed_scaffold(
grouped_estimator_spec.scaffold, self._train_distribution)
# TODO(yuefengz): add a test for unwrapping per_device_hooks.
def get_hooks_from_the_first_device(per_device_hooks):
return [
self._distribution.unwrap(per_device_hook)[0]
for per_device_hook in per_device_hooks
]
training_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_hooks)
training_chief_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_chief_hooks)
worker_hooks.append(
estimator_util.StrategyInitFinalizeHook(
self._train_distribution.initialize,
self._train_distribution.finalize))
estimator_spec = model_fn_lib.EstimatorSpec(
mode=grouped_estimator_spec.mode,
loss=loss,
train_op=self._train_distribution.group(distributed_train_op),
training_hooks=training_hooks,
training_chief_hooks=training_chief_hooks,
scaffold=scaffold)
return self._train_with_estimator_spec(estimator_spec, worker_hooks,
hooks, global_step_tensor,
saving_listeners)
def _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks,
global_step_tensor, saving_listeners):
"""Train a model with the given Estimator Spec."""
if self._warm_start_settings:
logging.info('Warm-starting with WarmStartSettings: %s' %
(self._warm_start_settings,))
warm_starting_util.warm_start(*self._warm_start_settings)
# Check if the user created a loss summary, and add one if they didn't.
# We assume here that the summary is called 'loss'. If it is not, we will
# make another one with the name 'loss' to ensure it shows up in the right
# graph in TensorBoard.
if not any([x.op.name == 'loss'
for x in ops.get_collection(ops.GraphKeys.SUMMARIES)]):
summary.scalar('loss', estimator_spec.loss)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
worker_hooks.extend(hooks)
worker_hooks.append(
training.NanTensorHook(estimator_spec.loss)
)
if self._config.log_step_count_steps is not None:
worker_hooks.append(
training.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=self._config.log_step_count_steps)
)
worker_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
training.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks)
saver_hooks = [
h for h in all_hooks if isinstance(h, training.CheckpointSaverHook)]
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
if not saver_hooks:
chief_hooks = [
training.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold)
]
saver_hooks = [chief_hooks[0]]
if saving_listeners:
if not saver_hooks:
raise ValueError(
'There should be a CheckpointSaverHook to use saving_listeners. '
'Please set one of the RunConfig.save_checkpoints_steps or '
'RunConfig.save_checkpoints_secs.')
else:
# It is expected to have one CheckpointSaverHook. If multiple, we pick
# up the first one to add listener.
saver_hooks[0]._listeners.extend(saving_listeners) # pylint: disable=protected-access
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=worker_hooks,
chief_only_hooks=(
tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)),
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config,
log_step_count_steps=self._config.log_step_count_steps) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
return loss
def _evaluate_build_graph(self, input_fn, hooks=None, checkpoint_path=None):
"""Builds the graph and related hooks to run evaluation."""
random_seed.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(ops.get_default_graph())
if self._eval_distribution:
(scaffold, evaluation_hooks, input_hooks, update_op, eval_dict) = (
self._call_model_fn_eval_distributed(input_fn, self.config))
else:
(scaffold, evaluation_hooks, input_hooks, update_op, eval_dict) = (
self._call_model_fn_eval(input_fn, self.config))
global_step_tensor = training_util.get_global_step(ops.get_default_graph())
# Call to warm_start has to be after model_fn is called.
self._maybe_warm_start(checkpoint_path)
if ops.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because Estimator '
'already defines a default metric with the same name.')
eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor
all_hooks = list(input_hooks)
all_hooks.extend(hooks)
all_hooks.extend(list(evaluation_hooks or []))
# New local variables have been added, so update the estimator spec's
# local init op if it was defined.
if scaffold and scaffold.local_init_op:
# Ensure that eval step has been created before updating local init op.
evaluation._get_or_create_eval_step() # pylint: disable=protected-access
scaffold = monitored_session.Scaffold(
local_init_op=control_flow_ops.group(
scaffold.local_init_op,
monitored_session.Scaffold.default_local_init_op()),
copy_from_scaffold=scaffold
)
return scaffold, update_op, eval_dict, all_hooks
def _call_model_fn_eval(self, input_fn, config):
"""Call model_fn for evaluation and handle return values."""
features, labels, input_hooks = self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.EVAL)
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, config)
eval_metric_ops = _verify_and_create_loss_metric(
estimator_spec.eval_metric_ops, estimator_spec.loss)
update_op, eval_dict = _extract_metric_update_ops(eval_metric_ops)
return (estimator_spec.scaffold, estimator_spec.evaluation_hooks,
input_hooks, update_op, eval_dict)
def _call_model_fn_eval_distributed(self, input_fn, config):
"""Call model_fn in distribution mode and handle return values."""
iterator, input_hooks = self._get_iterator_from_input_fn(
input_fn, model_fn_lib.ModeKeys.EVAL, self._eval_distribution)
is_tpu_strategy = (
self._eval_distribution.__class__.__name__ == 'TPUStrategy')
if is_tpu_strategy:
def step_fn(ctx, features, labels=None):
"""Runs one step of the eval computation and captures outputs."""
estimator_spec = self._eval_distribution.call_for_each_tower(
self._call_model_fn, features, labels, model_fn_lib.ModeKeys.EVAL,
config)
eval_metric_ops = _verify_and_create_loss_metric(
estimator_spec.eval_metric_ops, estimator_spec.loss,
self._eval_distribution)
update_op, eval_dict = _extract_metric_update_ops(
eval_metric_ops, self._eval_distribution)
ctx.set_non_tensor_output(name='estimator_spec', output=estimator_spec)
ctx.set_non_tensor_output(name='eval_dict', output=eval_dict)
return update_op
# TODO(priyag): Fix eval step hook to account for steps_per_run.
ctx = self._eval_distribution.run_steps_on_dataset(
step_fn, iterator, iterations=self._eval_distribution.steps_per_run)
update_op = ctx.run_op
eval_dict = ctx.non_tensor_outputs['eval_dict']
grouped_estimator_spec = ctx.non_tensor_outputs['estimator_spec']
else:
features, labels = estimator_util.parse_iterator_result(
iterator.get_next())
grouped_estimator_spec = self._eval_distribution.call_for_each_tower(
self._call_model_fn, features, labels,
model_fn_lib.ModeKeys.EVAL, config)
eval_metric_ops = _verify_and_create_loss_metric(
grouped_estimator_spec.eval_metric_ops, grouped_estimator_spec.loss,
self._eval_distribution)
update_op, eval_dict = _extract_metric_update_ops(
eval_metric_ops, self._eval_distribution)
scaffold = _combine_distributed_scaffold(
grouped_estimator_spec.scaffold, self._eval_distribution)
evaluation_hooks = self._eval_distribution.unwrap(
grouped_estimator_spec.evaluation_hooks)[0]
evaluation_hooks = evaluation_hooks + (
estimator_util.StrategyInitFinalizeHook(
self._eval_distribution.initialize,
self._eval_distribution.finalize),)
return (scaffold, evaluation_hooks, input_hooks, update_op, eval_dict)
def _evaluate_run(self, checkpoint_path, scaffold, update_op, eval_dict,
all_hooks, output_dir):
"""Run evaluation."""
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=all_hooks,
config=self._session_config)
current_global_step = eval_results[ops.GraphKeys.GLOBAL_STEP]
_write_dict_to_summary(
output_dir=output_dir,
dictionary=eval_results,
current_global_step=current_global_step)
if checkpoint_path:
_write_checkpoint_path_to_summary(
output_dir=output_dir,
checkpoint_path=checkpoint_path,
current_global_step=current_global_step)
return eval_results
def _maybe_warm_start(self, checkpoint_path):
if not checkpoint_path and self._warm_start_settings:
logging.info('Warm-starting with WarmStartSettings: %s' %
(self._warm_start_settings,))
warm_starting_util.warm_start(*self._warm_start_settings)
def _verify_and_create_loss_metric(eval_metric_ops, loss, distribution=None):
"""Creates a metric for loss and throws an error if one already exists."""
if model_fn_lib.LOSS_METRIC_KEY in eval_metric_ops:
raise ValueError(
'Metric with name "%s" is not allowed, because Estimator ' %
(model_fn_lib.LOSS_METRIC_KEY) +
'already defines a default metric with the same name.')
if distribution is None:
loss_metric = metrics_lib.mean(loss)
else:
loss_metric = distribution.call_for_each_tower(
metrics_lib.mean, loss)
eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric
return eval_metric_ops
def maybe_overwrite_model_dir_and_session_config(config, model_dir):
"""Overwrite estimator config by `model_dir` and `session_config` if needed.
Args:
config: Original estimator config.
model_dir: Estimator model checkpoint directory.
Returns:
Overwritten estimator config.
Raises:
ValueError: Model directory inconsistent between `model_dir` and `config`.
"""
if config is None:
config = run_config.RunConfig()
logging.info('Using default config.')
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of `RunConfig`, but provided %s.' % config)
if config.session_config is None:
session_config = run_config.get_default_session_config()
config = run_config.RunConfig.replace(config, session_config=session_config)
model_dir = compat_internal.path_to_str(model_dir)
if model_dir is not None:
if (getattr(config, 'model_dir', None) is not None and
config.model_dir != model_dir):
raise ValueError(
"`model_dir` are set both in constructor and `RunConfig`, but with "
"different values. In constructor: '{}', in `RunConfig`: "
"'{}' ".format(model_dir, config.model_dir))
if model_dir:
config = run_config.RunConfig.replace(config, model_dir=model_dir)
elif getattr(config, 'model_dir', None) is None:
model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s', model_dir)
config = run_config.RunConfig.replace(config, model_dir=model_dir)
return config
def create_per_tower_ready_for_local_init_op(scaffold):
"""Create a `tf.train.Scaffold.ready_for_local_init_op` inside a tower."""
if scaffold.ready_for_local_init_op:
return scaffold.ready_for_local_init_op
def default_ready_for_local_init_op():
return variables.report_uninitialized_variables(
variables.global_variables())
return monitored_session.Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
def _combine_distributed_scaffold(grouped_scaffold, distribution):
"""Combines scaffold(s) returned from `distribution.call_for_each_tower`."""
# TODO(anjalisridhar): Figure out how to resolve the following scaffold
# parameters: init_feed_dict, init_fn.
scaffold_list = distribution.unwrap(grouped_scaffold)
init_feed_dict = [
s.init_feed_dict
for s in scaffold_list
if s.init_feed_dict is not None
]
if init_feed_dict:
init_feed_dict = distribution.group(init_feed_dict)
else:
init_feed_dict = None
init_fn = [s.init_fn for s in scaffold_list if s.init_fn is not None]
if init_fn:
init_fn = distribution.group(init_fn)
else:
init_fn = None
init_op = [s.init_op for s in scaffold_list if s.init_op is not None]
if init_op:
init_op = distribution.group(init_op)
else:
init_op = None
def _unwrap_and_concat(value):
value = nest.flatten(distribution.unwrap(value))
if len(value) != 1:
return array_ops.concat(value, 0)
return value[0]
ready_op = distribution.call_for_each_tower(
lambda scaffold: scaffold.ready_op, grouped_scaffold)
if ready_op is not None:
ready_op = _unwrap_and_concat(ready_op)
ready_for_local_init_op = distribution.call_for_each_tower(
create_per_tower_ready_for_local_init_op, grouped_scaffold)
if ready_for_local_init_op is not None:
ready_for_local_init_op = _unwrap_and_concat(ready_for_local_init_op)
else:
ready_for_local_init_op = None
local_init_op = [
s.local_init_op
for s in scaffold_list
if s.local_init_op is not None
]
if local_init_op:
local_init_op = distribution.group(local_init_op)
else:
local_init_op = None
summary_op = [
s.summary_op for s in scaffold_list if s.summary_op is not None
]
if summary_op:
summary_op = distribution.group(summary_op)
else:
summary_op = None
scaffold = monitored_session.Scaffold(
init_op=init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op,
local_init_op=local_init_op,
summary_op=summary_op,
init_feed_dict=init_feed_dict,
init_fn=init_fn)
return scaffold
def _check_checkpoint_available(model_dir):
latest_path = checkpoint_management.latest_checkpoint(model_dir)
if not latest_path:
raise ValueError(
'Could not find trained model in model_dir: {}.'.format(model_dir))
def _check_hooks_type(hooks):
"""Returns hooks if all are `SessionRunHook`, raises TypeError otherwise."""
hooks = list(hooks or [])
for h in hooks:
if not isinstance(h, training.SessionRunHook):
raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))
return hooks
def _check_listeners_type(saving_listeners):
"""Check listeners type."""
listeners = list(saving_listeners or [])
for l in listeners:
if not isinstance(l, training.CheckpointSaverListener):
raise TypeError(
'saving_listeners must be a list of CheckpointSaverListener, '
'given: {}'.format(l))
return listeners
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default `device_fn`.
`Estimator` uses `tf.train.ReplicaDeviceSetter` as a default device placer. It
sets the
distributed related arguments such as number of `ps_replicas` based on given
`config`.
Args:
config: A `tf.estimator.RunConfig` instance.
Returns:
A replica device setter, or `None`.
"""
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=list(device_setter.STANDARD_PS_OPS),
cluster=config.cluster_spec)
else:
return None
def _verify_model_fn_args(model_fn, params):
"""Verifies `model_fn` arguments."""
args = set(function_utils.fn_args(model_fn))
if 'features' not in args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if params is not None and 'params' not in args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' % (model_fn,
params))
if params is None and 'params' in args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
non_valid_args = list(args - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = training.NewCheckpointReader(
training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict, distribution=None):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, value in sorted(six.iteritems(eval_dict)):
value_ops[name] = value[0]
update_ops.append(
distribution.group(value[1]) if distribution else value[1])
update_op = control_flow_ops.group(*update_ops) if update_ops else None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary))
if not isinstance(v, six.binary_type))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.binary_type):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = '%s/%d' % (key, i)
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tensor_util.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
# pylint: disable=line-too-long
logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
# pylint: enable=line-too-long
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
def _write_checkpoint_path_to_summary(output_dir, checkpoint_path,
current_global_step):
"""Writes `checkpoint_path` into summary file in the given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
checkpoint_path: `str`, checkpoint file path to be written to summary file.
current_global_step: `int`, the current global step.
"""
checkpoint_path_tag = 'checkpoint_path'
logging.info('Saving \'%s\' summary for global step %d: %s',
checkpoint_path_tag, current_global_step, checkpoint_path)
summary_proto = summary_pb2.Summary()
summary_proto.value.add(
tag=checkpoint_path_tag,
tensor=tensor_util.make_tensor_proto(
checkpoint_path, dtype=dtypes.string))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
def _has_dataset_or_queue_runner(maybe_tensor):
"""Returns `True` if `Dataset` or `QueueRunner` has been used."""
# Check TF dataset first. Here, we use a simple algorithm to check the top
# level Tensors only, which should be sufficient for most users.
tensors = [x for x in nest.flatten(maybe_tensor) if isinstance(x, ops.Tensor)]
if any([t.op.type == 'IteratorGetNext' for t in tensors]):
return True
# Now, check queue.
return ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS)
VocabInfo = warm_starting_util.VocabInfo # pylint: disable=invalid-name
estimator_export('estimator.VocabInfo')(VocabInfo)
@estimator_export('estimator.WarmStartSettings')
class WarmStartSettings(
collections.namedtuple('WarmStartSettings', [
'ckpt_to_initialize_from',
'vars_to_warm_start',
'var_name_to_vocab_info',
'var_name_to_prev_var_name',
])):
"""Settings for warm-starting in `tf.estimator.Estimators`.
Example Use with canned `tf.estimator.DNNEstimator`:
```
emb_vocab_file = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_file(
"sc_vocab_file", "new_vocab.txt", vocab_size=100),
dimension=8)
emb_vocab_list = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
"sc_vocab_list", vocabulary_list=["a", "b"]),
dimension=8)
estimator = tf.estimator.DNNClassifier(
hidden_units=[128, 64], feature_columns=[emb_vocab_file, emb_vocab_list],
warm_start_from=ws)
```
where `ws` could be defined as:
Warm-start all weights in the model (input layer and hidden weights).
Either the directory or a specific checkpoint can be provided (in the case
of the former, the latest checkpoint will be used):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp")
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp/model-1000")
```
Warm-start only the embeddings (input layer):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp",
vars_to_warm_start=".*input_layer.*")
```
Warm-start all weights but the embedding parameters corresponding to
`sc_vocab_file` have a different vocab from the one used in the current
model:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt"
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start only `sc_vocab_file` embeddings (and no other variables), which
have a different vocab from the one used in the current model:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt"
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
vars_to_warm_start=None,
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start all weights but the parameters corresponding to `sc_vocab_file`
have a different vocab from the one used in current checkpoint, and only
100 of those entries were used:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt",
old_vocab_size=100
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start all weights but the parameters corresponding to `sc_vocab_file`
have a different vocab from the one used in current checkpoint and the
parameters corresponding to `sc_vocab_list` have a different name from the
current checkpoint:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt",
old_vocab_size=100
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
},
var_name_to_prev_var_name={
"input_layer/sc_vocab_list_embedding/embedding_weights":
"old_tensor_name"
})
```
Attributes:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
vars_to_warm_start: [Optional] One of the following: - A regular expression
(string) that captures which variables to warm-start (see
`tf.get_collection`). This expression will only consider variables in the
`TRAINABLE_VARIABLES` collection. - A list of Variables to warm-start. - A
list of strings, each representing a full variable name to warm-start. -
`None`, in which case only variables specified in `var_name_to_vocab_info`
will be warm-started. Defaults to `'.*'`, which warm-starts all variables
in the `TRAINABLE_VARIABLES` collection. Note that this excludes
variables such as accumulators and moving statistics from batch norm.
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
`tf.estimator.VocabInfo`. The variable names should be "full" variables,
not the names of the partitions. If not explicitly provided, the variable
is assumed to have no (changes to) vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
between previous checkpoint and current model.
"""
def __new__(cls,
ckpt_to_initialize_from,
vars_to_warm_start='.*',
var_name_to_vocab_info=None,
var_name_to_prev_var_name=None):
if not ckpt_to_initialize_from:
raise ValueError(
'`ckpt_to_initialize_from` MUST be set in WarmStartSettings')
return super(WarmStartSettings, cls).__new__(
cls,
ckpt_to_initialize_from,
vars_to_warm_start,
var_name_to_vocab_info or {},
var_name_to_prev_var_name or {},
)
def _get_saved_model_ckpt(saved_model_dir):
"""Return path to variables checkpoint in a `SavedModel` directory."""
if not gfile.Exists(
os.path.join(saved_model_utils.get_variables_dir(saved_model_dir),
compat.as_text('variables.index'))):
raise ValueError('Directory provided has an invalid SavedModel format: %s'
% saved_model_dir)
return saved_model_utils.get_variables_path(saved_model_dir)
def _get_default_warm_start_settings(warm_start_from):
"""Returns default `tf.estimator.WarmStartSettings`.
Args:
warm_start_from: Either a string representing the filepath of a checkpoint
or `SavedModel` to initialize from, or an instance of
`tf.estimator.WarmStartSettings`.
Returns:
Either None or an instance of `WarmStartSettings`.
Raises:
ValueError: If `warm_start_from` is not `None` but is neither a string nor
an
instance of `WarmStartSettings`.
"""
if warm_start_from is None:
return None
if isinstance(warm_start_from, (six.string_types, six.binary_type)):
# Infer that this is a SavedModel if export_path +
# 'variables/variables.index' exists, and if so, construct the
# WarmStartSettings pointing to the variables path
# (export_path + 'variables/variables').
if gfile.Exists(os.path.join(
saved_model_utils.get_variables_dir(warm_start_from),
compat.as_text('variables.index'))):
logging.info('Warm-starting from a SavedModel')
return WarmStartSettings(
ckpt_to_initialize_from=saved_model_utils.get_variables_path(
warm_start_from))
return WarmStartSettings(ckpt_to_initialize_from=warm_start_from)
elif isinstance(warm_start_from, WarmStartSettings):
return warm_start_from
else:
raise ValueError('warm_start_from must be a string or a WarmStartSettings, '
'instead got {}'.format(type(warm_start_from)))<|fim▁end|> | Args:
name: string or a list of string, name of the tensor.
Returns: |
<|file_name|>session_handler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: UTF-8
"""
This file is part of Commix Project (http://commixproject.com).
Copyright (c) 2014-2017 Anastasios Stasinopoulos (@ancst).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
For more see the file 'readme/COPYING' for copying permission.
"""
import os
import sys
import time
import base64
import sqlite3
import urllib2
from src.utils import menu
from src.utils import settings
from src.thirdparty.colorama import Fore, Back, Style, init
"""
Session handler via SQLite3 db.
"""
no_such_table = False
"""
Generate table name for SQLite3 db.
"""
def table_name(url):
host = url.split('//', 1)[1].split('/', 1)[0]
table_name = "session_" + host.replace(".","_").replace(":","_").replace("-","_")
return table_name
"""
Flush session.
"""
def flush(url):
info_msg = "Flushing the stored session from the session file... "
sys.stdout.write(settings.print_info_msg(info_msg))
sys.stdout.flush()
try:
conn = sqlite3.connect(settings.SESSION_FILE)
tables = list(conn.execute("SELECT name FROM sqlite_master WHERE type is 'table'"))
conn.executescript(';'.join(["DROP TABLE IF EXISTS %s" %i for i in tables]))
conn.commit()
conn.close()
print "[ " + Fore.GREEN + "SUCCEED" + Style.RESET_ALL + " ]"
except sqlite3.OperationalError, err_msg:
print "[ " + Fore.RED + "FAILED" + Style.RESET_ALL + " ]"
err_msg = "Unable to flush the session file." + str(err_msg).title()
print settings.print_critical_msg(err_msg)
"""
Clear injection point records
except latest for every technique.
"""
def clear(url):
try:
if no_such_table:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("DELETE FROM " + table_name(url) + "_ip WHERE "\
"id NOT IN (SELECT MAX(id) FROM " + \
table_name(url) + "_ip GROUP BY technique);")
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except:
settings.LOAD_SESSION = False
return False
"""
Import successful injection points to session file.
"""
def injection_point_importation(url, technique, injection_type, separator, shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_request_method, url_time_response, timesec, how_long, output_length, is_vulnerable):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_ip" + \
"(id INTEGER PRIMARY KEY, url VARCHAR, technique VARCHAR, injection_type VARCHAR, separator VARCHAR," \
"shell VARCHAR, vuln_parameter VARCHAR, prefix VARCHAR, suffix VARCHAR, "\
"TAG VARCHAR, alter_shell VARCHAR, payload VARCHAR, http_header VARCHAR, http_request_method VARCHAR, url_time_response INTEGER, "\
"timesec INTEGER, how_long INTEGER, output_length INTEGER, is_vulnerable VARCHAR);")
conn.execute("INSERT INTO " + table_name(url) + "_ip(url, technique, injection_type, separator, "\
"shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_header, http_request_method, "\
"url_time_response, timesec, how_long, output_length, is_vulnerable) "\
"VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", \
(str(url), str(technique), str(injection_type), \
str(separator), str(shell), str(vuln_parameter), str(prefix), str(suffix), \
str(TAG), str(alter_shell), str(payload), str(settings.HTTP_HEADER), str(http_request_method), \
int(url_time_response), int(timesec), int(how_long), \
int(output_length), str(is_vulnerable)))
conn.commit()
conn.close()
if settings.INJECTION_CHECKER == False:
settings.INJECTION_CHECKER = True
except sqlite3.OperationalError, err_msg:
err_msg = str(err_msg)[:1].upper() + str(err_msg)[1:] + "."
err_msg += " You are advised to rerun with switch '--flush-session'."
print settings.print_critical_msg(err_msg)
sys.exit(0)
except sqlite3.DatabaseError, err_msg:
err_msg = "An error occurred while accessing session file ('"
err_msg += settings.SESSION_FILE + "'). "
err_msg += "If the problem persists use the '--flush-session' option."
print "\n" + settings.print_critical_msg(err_msg)
sys.exit(0)
"""
Export successful applied techniques from session file.
"""
def applied_techniques(url, http_request_method):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
if settings.TESTABLE_PARAMETER:
applied_techniques = conn.execute("SELECT technique FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC ;")
else:
applied_techniques = conn.execute("SELECT technique FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.INJECT_TAG + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC ;")
values = []
for session in applied_techniques:
if "tempfile" in session[0][:8]:
settings.TEMPFILE_BASED_STATE = True
session = session[0][4:]
elif "dynamic" in session[0][:7]:
settings.EVAL_BASED_STATE = True
session = session[0][13:]
values += session[0][:1]
applied_techniques = ''.join(list(set(values)))
return applied_techniques
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Export successful applied techniques from session file.
"""
def applied_levels(url, http_request_method):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
if settings.TESTABLE_PARAMETER:
applied_level = conn.execute("SELECT is_vulnerable FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC;")
else:
applied_level = conn.execute("SELECT is_vulnerable FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.INJECT_TAG + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC;")
for session in applied_level:
return session[0]
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Export successful injection points from session file.
"""
def injection_point_exportation(url, http_request_method):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)
result = conn.execute("SELECT * FROM sqlite_master WHERE name = '" + \
table_name(url) + "_ip' AND type = 'table';")
if result:
if menu.options.tech[:1] == "c":
select_injection_type = "R"
elif menu.options.tech[:1] == "e":
settings.EVAL_BASED_STATE = True
select_injection_type = "R"
elif menu.options.tech[:1] == "t":
select_injection_type = "B"
else:
select_injection_type = "S"
if settings.TEMPFILE_BASED_STATE and select_injection_type == "S":
check_injection_technique = "t"
elif settings.EVAL_BASED_STATE and select_injection_type == "R":
check_injection_technique = "d"
else:
check_injection_technique = menu.options.tech[:1]
if settings.TESTABLE_PARAMETER:
cursor = conn.execute("SELECT * FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"injection_type like '" + select_injection_type + "%' AND "\
"technique like '" + check_injection_technique + "%' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC limit 1;")
else:
cursor = conn.execute("SELECT * FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"injection_type like '" + select_injection_type + "%' AND "\
"technique like '" + check_injection_technique + "%' AND "\
"http_header = '" + settings.HTTP_HEADER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC limit 1;")
for session in cursor:
url = session[1]
technique = session[2]
injection_type = session[3]
separator = session[4]
shell = session[5]
vuln_parameter = session[6]
prefix = session[7]
suffix = session[8]
TAG = session[9]
alter_shell = session[10]
payload = session[11]
http_request_method = session[13]
url_time_response = session[14]
timesec = session[15]
how_long = session[16]
output_length = session[17]
is_vulnerable = session[18]
return url, technique, injection_type, separator, shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_request_method, url_time_response, timesec, how_long, output_length, is_vulnerable
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Notification about session.
"""
def notification(url, technique, injection_type):
try:
if settings.LOAD_SESSION == True:
success_msg = "A previously stored session has been held against that host."
print settings.print_success_msg(success_msg)
while True:
if not menu.options.batch:
question_msg = "Do you want to resume to the "
question_msg += "(" + injection_type.split(" ")[0] + ") "
question_msg += technique.rsplit(' ', 2)[0]
question_msg += " injection point? [Y/n] > "
sys.stdout.write(settings.print_question_msg(question_msg))
settings.LOAD_SESSION = sys.stdin.readline().replace("\n","").lower()
else:
settings.LOAD_SESSION = ""
if len(settings.LOAD_SESSION) == 0:
settings.LOAD_SESSION = "y"
if settings.LOAD_SESSION in settings.CHOICE_YES:
return True
elif settings.LOAD_SESSION in settings.CHOICE_NO:
settings.LOAD_SESSION = False
if technique[:1] != "c":
while True:
question_msg = "Which technique do you want to re-evaluate? [(C)urrent/(a)ll/(n)one] > "
sys.stdout.write(settings.print_question_msg(question_msg))
proceed_option = sys.stdin.readline().replace("\n","").lower()
if len(proceed_option) == 0:
proceed_option = "c"
if proceed_option.lower() in settings.CHOICE_PROCEED :
if proceed_option.lower() == "a":
settings.RETEST = True
break
elif proceed_option.lower() == "c" :
settings.RETEST = False
break
elif proceed_option.lower() == "n":
raise SystemExit()
else:
pass
else:
err_msg = "'" + proceed_option + "' is not a valid answer."
print settings.print_error_msg(err_msg)
pass
if settings.SESSION_APPLIED_TECHNIQUES:
menu.options.tech = ''.join(settings.AVAILABLE_TECHNIQUES)
return False
elif settings.LOAD_SESSION in settings.CHOICE_QUIT:
raise SystemExit()
else:
err_msg = "'" + settings.LOAD_SESSION + "' is not a valid answer."
print settings.print_error_msg(err_msg)
pass
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
"""
Check for specific stored parameter.
"""
def check_stored_parameter(url, http_request_method):
if injection_point_exportation(url, http_request_method):
if injection_point_exportation(url, http_request_method)[16] == str(menu.options.level):
# Check for stored alternative shell
if injection_point_exportation(url, http_request_method)[9] != "":
menu.options.alter_shell = injection_point_exportation(url, http_request_method)[9]
return True
else:
return False
else:
return False
"""
Import successful command execution outputs to session file.
"""
def store_cmd(url, cmd, shell, vuln_parameter):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_ir" + \
"(cmd VARCHAR, output VARCHAR, vuln_parameter VARCHAR);")
if settings.TESTABLE_PARAMETER:
conn.execute("INSERT INTO " + table_name(url) + "_ir(cmd, output, vuln_parameter) "\
"VALUES(?,?,?)", \
(str(base64.b64encode(cmd)), str(base64.b64encode(shell)), str(vuln_parameter)))
else:
conn.execute("INSERT INTO " + table_name(url) + "_ir(cmd, output, vuln_parameter) "\
"VALUES(?,?,?)", \
(str(base64.b64encode(cmd)), str(base64.b64encode(shell)), str(settings.HTTP_HEADER)))
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except TypeError, err_msg:
pass
"""
Export successful command execution outputs from session file.
"""
def export_stored_cmd(url, cmd, vuln_parameter):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)<|fim▁hole|> if settings.TESTABLE_PARAMETER:
cursor = conn.execute("SELECT output FROM " + table_name(url) + \
"_ir WHERE cmd='" + base64.b64encode(cmd) + "' AND "\
"vuln_parameter= '" + vuln_parameter + "';").fetchall()
else:
cursor = conn.execute("SELECT output FROM " + table_name(url) + \
"_ir WHERE cmd='" + base64.b64encode(cmd) + "' AND "\
"vuln_parameter= '" + settings.HTTP_HEADER + "';").fetchall()
conn.commit()
conn.close()
for session in cursor:
output = base64.b64decode(session[0])
return output
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
pass
"""
Import valid credentials to session file.
"""
def import_valid_credentials(url, authentication_type, admin_panel, username, password):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_creds" + \
"(id INTEGER PRIMARY KEY, url VARCHAR, authentication_type VARCHAR, admin_panel VARCHAR, "\
"username VARCHAR, password VARCHAR);")
conn.execute("INSERT INTO " + table_name(url) + "_creds(url, authentication_type, "\
"admin_panel, username, password) VALUES(?,?,?,?,?)", \
(str(url), str(authentication_type), str(admin_panel), \
str(username), str(password)))
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except sqlite3.DatabaseError, err_msg:
err_msg = "An error occurred while accessing session file ('"
err_msg += settings.SESSION_FILE + "'). "
err_msg += "If the problem persists use the '--flush-session' option."
print "\n" + settings.print_critical_msg(err_msg)
sys.exit(0)
"""
Export valid credentials from session file.
"""
def export_valid_credentials(url, authentication_type):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)
output = None
conn = sqlite3.connect(settings.SESSION_FILE)
cursor = conn.execute("SELECT username, password FROM " + table_name(url) + \
"_creds WHERE url='" + url + "' AND "\
"authentication_type= '" + authentication_type + "';").fetchall()
cursor = ":".join(cursor[0])
return cursor
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
pass
# eof<|fim▁end|> | output = None
conn = sqlite3.connect(settings.SESSION_FILE) |
<|file_name|>action.py<|end_file_name|><|fim▁begin|># Copyright (C) 2014 Linaro Limited
#
# Author: Neil Williams <[email protected]>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
import os
import sys
import time
import types
import yaml
import logging
import subprocess
from collections import OrderedDict
from contextlib import contextmanager
from lava_dispatcher.config import get_device_config
class InfrastructureError(Exception):
"""
Exceptions based on an error raised by a component of the
test which is neither the LAVA dispatcher code nor the
code being executed on the device under test. This includes
errors arising from the device (like the arndale SD controller
issue) and errors arising from the hardware to which the device
is connected (serial console connection, ethernet switches or
internet connection beyond the control of the device under test).
Use the existing RuntimeError exception for errors arising
from bugs in LAVA code.
"""
pass
class JobError(Exception):
"""
An Error arising from the information supplied as part of the TestJob
e.g. HTTP404 on a file to be downloaded as part of the preparation of
the TestJob or a download which results in a file which tar or gzip
does not recognise.
"""
pass
class TestError(Exception):
"""
An error in the operation of the test definition.
"""
pass
class YamlFilter(logging.Filter):
"""
filters standard logs into structured logs
"""
def filter(self, record):
record.msg = yaml.dump(record.msg)
return True
class Pipeline(object):
"""
Pipelines ensure that actions are run in the correct sequence whilst
allowing for retries and other requirements.
When an action is added to a pipeline, the level of that action within
the overall job is set along with the formatter and output filename
of the per-action log handler.
"""
def __init__(self, parent=None, job=None):
self.children = {}
self.actions = []
self.summary = "pipeline"
self.parent = None
self.job = None
self.branch_level = 1 # the level of the last added child
if job: # do not unset if set by outer pipeline
self.job = job
if not parent:
self.children = {self: self.actions}
elif not parent.level:
raise RuntimeError("Tried to create a pipeline using a parent action with no level set.")
else:
# parent must be an Action
if not isinstance(parent, Action):
raise RuntimeError("Internal pipelines need an Action as a parent")
self.parent = parent
self.branch_level = parent.level
if parent.job:
self.job = parent.job
def _check_action(self, action):
if not action or not issubclass(type(action), Action):
raise RuntimeError("Only actions can be added to a pipeline: %s" % action)
if not action:
raise RuntimeError("Unable to add empty action to pipeline")
if not action.name:
raise RuntimeError("Unnamed action!")
if ' ' in action.name:
raise RuntimeError("Whitespace must not be used in action names, only descriptions or summaries")
def add_action(self, action):
self._check_action(action)
self.actions.append(action)
action.level = "%s.%s" % (self.branch_level, len(self.actions))
if self.job: # should only be None inside the unit tests
action.job = self.job
if self.parent: # action
self.children.update({self: self.actions})
self.parent.pipeline = self
else:
action.level = "%s" % (len(self.actions))
# create a log handler just for this action.
if self.job and self.job.parameters['output_dir']:
yaml_filename = os.path.join(
self.job.parameters['output_dir'],
"%s-%s.log" % (action.level, action.name)
)
action.log_handler = logging.FileHandler(yaml_filename, mode='a', encoding="utf8")
# per action loggers always operate in DEBUG mode - the frontend does the parsing later.
action.log_handler.setLevel(logging.DEBUG)
# yaml wrapper inside the log handler
action.log_handler.setFormatter(logging.Formatter('id: "<LAVA_DISPATCHER>%(asctime)s"\n%(message)s'))
# if the action has an internal pipeline, initialise that here.
action.populate()
def _describe(self, structure):
# TODO: make the amount of output conditional on a parameter passed to describe
for action in self.actions:
structure[action.level] = {
'description': action.description,
'summary': action.summary,
'content': action.explode()
}
if not action.pipeline:
continue
action.pipeline._describe(structure)
def describe(self):
"""
Describe the current pipeline, recursing through any
internal pipelines.
:return: JSON string of the structure<|fim▁hole|> self._describe(structure)
return structure
@property
def errors(self):
sub_action_errors = [a.errors for a in self.actions]
return reduce(lambda a, b: a + b, sub_action_errors)
def validate_actions(self):
for action in self.actions:
action.validate()
def run_actions(self, connection, args=None):
for action in self.actions:
yaml_log = None
std_log = logging.getLogger("ASCII")
if not action.log_handler:
# FIXME: unit test needed
# if no output dir specified in the job
std_log.debug("no output-dir, logging %s:%s to stdout", action.level, action.name)
else:
yaml_log = logging.getLogger("YAML") # allows per-action logs in yaml
yaml_log.setLevel(logging.DEBUG) # yaml log is always in debug
# enable the log handler created in this action when it was added to this pipeline
yaml_log.addHandler(action.log_handler)
yaml_log.debug({'start': {action.level: action.name}})
try:
new_connection = action.run(connection, args)
if new_connection:
connection = new_connection
except KeyboardInterrupt:
action.cleanup()
self.err = "\rCancel" # Set a useful message.
if self.parent:
raise KeyboardInterrupt
break
except (JobError, InfrastructureError) as exc:
action.errors = exc.message
action.results = {"fail": exc}
# set results including retries
if action.log_handler:
# remove per-action log handler
yaml_log.removeHandler(action.log_handler)
return connection
def prepare_actions(self):
for action in self.actions:
action.prepare()
def post_process_actions(self):
for action in self.actions:
action.post_process()
class Action(object):
def __init__(self):
"""
Actions get added to pipelines by calling the
Pipeline.add_action function. Other Action
data comes from the parameters. Actions with
internal pipelines push parameters to actions
within those pipelines. Parameters are to be
treated as inmutable.
Logs written to the per action log must use the YAML logger.
Output for stdout (which is redirected to the oob_file by the
scheduler) should use the ASCII logger.
yaml_log = logging.getLogger("YAML")
std_log = logging.getLogger("ASCII")
"""
# FIXME: too many?
self.__summary__ = None
self.__description__ = None
self.__level__ = None
self.err = None
self.pipeline = None
self.internal_pipeline = None
self.__parameters__ = {}
self.yaml_line = None # FIXME: should always be in parameters
self.__errors__ = []
self.elapsed_time = None # FIXME: pipeline_data?
self.log_handler = None
self.job = None
self.results = None
self.env = None # FIXME make this a parameter which gets default value when first called
# public actions (i.e. those who can be referenced from a job file) must
# declare a 'class-type' name so they can be looked up.
# summary and description are used to identify instances.
name = None
@property
def description(self):
"""
The description of the command, set by each instance of
each class inheriting from Action.
Used in the pipeline to explain what the commands will
attempt to do.
:return: a string created by the instance.
"""
return self.__description__
@description.setter
def description(self, description):
self.__set_desc__(description)
def __set_desc__(self, desc):
self.__description__ = desc
@property
def summary(self):
"""
A short summary of this instance of a class inheriting
from Action. May be None.
Can be used in the pipeline to summarise what the commands
will attempt to do.
:return: a string or None.
"""
return self.__summary__
@summary.setter
def summary(self, summary):
self.__set_summary__(summary)
def __set_summary__(self, summary):
self.__summary__ = summary
@property
def data(self):
"""
Shortcut to the job.context.pipeline_data
"""
if not self.job:
return None
return self.job.context.pipeline_data
@data.setter
def data(self, value):
"""
Accepts a dict to be updated in the job.context.pipeline_data
"""
self.job.context.pipeline_data.update(value)
@classmethod
def find(cls, name):
for subclass in cls.__subclasses__():
if subclass.name == name:
return subclass
raise KeyError("Cannot find action named \"%s\"" % name)
@property
def errors(self):
return self.__errors__
@errors.setter
def errors(self, error):
self._log(error)
self.__errors__.append(error)
@property
def valid(self):
return len([x for x in self.errors if x]) == 0
@property
def level(self):
"""
The level of this action within the pipeline. Levels
start at one and each pipeline within an command uses
a level within the level of the parent pipeline.
First command in Outer pipeline: 1
First command in pipeline within outer pipeline: 1.1
level is set during pipeline creation and must not
be changed subsequently except by RetryCommand..
:return: a string
"""
return self.__level__
@level.setter
def level(self, value):
self.__set_level__(value)
def __set_level__(self, value):
self.__level__ = value
@property
def parameters(self):
"""
All data which this action needs to have available for
the prepare, run or post_process functions needs to be
set as a parameter. The parameters will be validated
during pipeline creation.
This allows all pipelines to be fully described, including
the parameters supplied to each action, as well as supporting
tests on each parameter (like 404 or bad formatting) during
validation of each action within a pipeline.
Parameters are static, internal data within each action
copied directly from the YAML. Dynamic data is held in
the context available via the parent Pipeline()
"""
return self.__parameters__
def __set_parameters__(self, data):
self.__parameters__.update(data)
@parameters.setter
def parameters(self, data):
self.__set_parameters__(data)
if self.pipeline:
for action in self.pipeline.actions:
action.parameters = self.parameters
def validate(self):
"""
This method needs to validate the parameters to the action. For each
validation that is found, an item should be added to self.errors.
Validation includes parsing the parameters for this action for
values not set or values which conflict.
"""
if self.errors:
self._log("Validation failed")
raise JobError("Invalid job data: %s\n" % '\n'.join(self.errors))
def populate(self):
"""
This method allows an action to add an internal pipeline
"""
pass
def prepare(self):
"""
This method will be called before deploying an image to the target,
being passed a local mount point with the target root filesystem. This
method will then have a chance to modify the root filesystem, including
editing existing files (which should be used with caution) and adding
new ones. Any modifications done will be reflected in the final image
which is deployed to the target.
In this classs this method does nothing. It must be implemented by
subclasses
"""
pass
def __call__(self, connection):
try:
new_connection = self.run(connection)
return new_connection
finally:
self.cleanup()
def _log(self, message):
if not message:
return
yaml_log = logging.getLogger("YAML")
std_log = logging.getLogger("ASCII")
yaml_log.debug({"output": message.split('\n')})
std_log.info(message)
def _run_command(self, command_list, env=None):
"""
Single location for all external command operations on the
dispatcher, without using a shell and with full structured logging.
Ensure that output for the YAML logger is a serialisable object
and strip embedded newlines / whitespace where practical.
Returns the output of the command (after logging the output)
Includes default support for proxy settings in the environment.
"""
if type(command_list) != list:
raise RuntimeError("commands to _run_command need to be a list")
yaml_log = logging.getLogger("YAML")
log = None
if not self.env:
self.env = {'http_proxy': self.job.context.config.lava_proxy,
'https_proxy': self.job.context.config.lava_proxy}
if env:
self.env.update(env)
# FIXME: distinguish between host and target commands and add 'nice' to host
try:
log = subprocess.check_output(command_list, stderr=subprocess.STDOUT, env=self.env)
except KeyboardInterrupt:
self.cleanup()
self.err = "\rCancel" # Set a useful message.
except OSError as exc:
yaml_log.debug({exc.strerror: exc.child_traceback.split('\n')})
except subprocess.CalledProcessError as exc:
self.errors = exc.message
yaml_log.debug({
'command': [i.strip() for i in exc.cmd],
'message': [i.strip() for i in exc.message],
'output': exc.output.split('\n')})
self._log("%s\n%s" % (' '.join(command_list), log))
return log
def run(self, connection, args=None):
"""
This method is responsible for performing the operations that an action
is supposed to do.
This method usually returns nothing. If it returns anything, that MUST
be an instance of Connection. That connection will be the one passed on
to the next action in the pipeline.
In this classs this method does nothing. It must be implemented by
subclasses
:param args: Command and arguments to run
:raise: Classes inheriting from BaseAction must handle
all exceptions possible from the command and re-raise
KeyboardInterrupt to allow for Cancel operations. e.g.:
try:
# call the command here
except KeyboardInterrupt:
self.cleanup()
self.err = "\rCancel" # Set a useful message.
sys.exit(1) # Only in the top level pipeline
except Exception as e:
raise e
finally:
self.cleanup()
if self.err:
print self.err
"""
pass
def cleanup(self):
"""
This method *will* be called after perform(), no matter whether
perform() raises an exception or not. It should cleanup any resources
that may be left open by perform, such as, but not limited to:
- open file descriptors
- mount points
- error codes
- etc
"""
try:
raise
except:
sys.exc_clear()
def post_process(self):
"""
After tests finish running, the test results directory will be
extracted, and passed to this method so that the action can
inspect/extract its results.
In this classs this method does nothing. It must be implemented by
subclasses
"""
pass
def explode(self):
"""
serialisation support
"""
data = {}
members = [attr for attr in dir(self) if not callable(attr) and not attr.startswith("__")]
members.sort()
for name in members:
if name == "pipeline":
continue
content = getattr(self, name)
if name == "job" or name == "log_handler" or name == "internal_pipeline":
continue
if name == 'parameters':
# FIXME: implement the handling of parameters to be serialisable
if 'deployment_data' in content:
del content['deployment_data']
import json
content = json.dumps(content)
if isinstance(content, types.MethodType):
continue
if content:
data[name] = content
return data
class RetryAction(Action):
def __init__(self):
super(RetryAction, self).__init__()
self.retries = 0
self.max_retries = 5
self.sleep = 1
def run(self, connection, args=None):
while self.retries <= self.max_retries:
try:
new_connection = self.run(connection)
return new_connection
except KeyboardInterrupt:
self.cleanup()
self.err = "\rCancel" # Set a useful message.
except (JobError, InfrastructureError):
self._log("%s failed, trying again" % self.name)
self.retries += 1
time.sleep(self.sleep)
finally:
self.cleanup()
raise JobError("%s retries failed for %s" % (self.retries, self.name))
def __call__(self, connection):
self.run(connection)
class Deployment(object):
"""
Deployment is a strategy class which aggregates Actions
until the request from the YAML can be validated or rejected.
Translates the parsed pipeline into Actions and populates
each Action with parameters.
"""
priority = 0
def __init__(self, parent):
self.__parameters__ = {}
self.pipeline = parent
self.job = parent.job
@contextmanager
def deploy(self):
"""
This method first mounts the image locally, exposing its root
filesystem in a local directory which will be yielded to the
caller, which has the chance to modify the contents of the root
filesystem.
Then, the root filesystem will be unmounted and the image will
be deployed to the device.
This method must be implemented by subclasses.
"""
raise NotImplementedError("deploy")
@contextmanager
def extract_results(self):
"""
This method will extract the results directory from the root filesystem
in the device. After copying that directory locally, the local copy
will be yielded to the caller, who can read data from it.
Must be implemented by subclasses.
"""
raise NotImplementedError("extract_results")
@property
def parameters(self):
"""
All data which this action needs to have available for
the prepare, run or post_process functions needs to be
set as a parameter. The parameters will be validated
during pipeline creation.
This allows all pipelines to be fully described, including
the parameters supplied to each action, as well as supporting
tests on each parameter (like 404 or bad formatting) during
validation of each action within a pipeline.
Parameters are static, internal data within each action
copied directly from the YAML or Device configuration.
Dynamic data is held in the context available via the parent Pipeline()
"""
return self.__parameters__
def __set_parameters__(self, data):
self.__parameters__.update(data)
@parameters.setter
def parameters(self, data):
self.__set_parameters__(data)
@classmethod
def accepts(cls, device, parameters):
"""
Returns True if this deployment strategy can be used the the
given device and details of an image in the parameters.
Must be implemented by subclasses.
"""
return NotImplementedError("accepts")
@classmethod
def select(cls, device, parameters):
candidates = cls.__subclasses__()
willing = [c for c in candidates if c.accepts(device, parameters)]
if len(willing) == 0:
raise NotImplementedError(
"No deployment strategy available for the given "
"device '%s'." % device.config.hostname)
# higher priority first
compare = lambda x, y: cmp(y.priority, x.priority)
prioritized = sorted(willing, compare)
return prioritized[0]
class Image(object):
"""
Create subclasses for each type of image: prebuilt, hwpack+rootfs,
kernel+rootfs+dtb+..., dummy, ...
TBD: this might not be needed.
"""
@contextmanager
def mount_rootfs(self):
"""
Subclasses must implement this method
"""
raise NotImplementedError("mount_rootfs")
class Connection(object):
def __init__(self, device, raw_connection):
self.device = device
self.raw_connection = raw_connection
class Device(object):
"""
Holds all data about the device for this TestJob including
all database parameters and device condfiguration.
In the dumb dispatcher model, an instance of Device would
be populated directly from the master scheduler.
"""
def __init__(self, hostname):
self.config = get_device_config(hostname)<|fim▁end|> | """
structure = OrderedDict() |
<|file_name|>bleachchar.py<|end_file_name|><|fim▁begin|>from django.db import models<|fim▁hole|>
class BleachCharField(BleachField, models.CharField):
def pre_save(self, model_instance, add):
new_value = getattr(model_instance, self.attname)
clean_value = self.clean_text(new_value)
setattr(model_instance, self.attname, clean_value)
return super(BleachCharField, self).pre_save(model_instance, add)<|fim▁end|> |
from .bleachfield import BleachField
|
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.template import loader
from django.views import generic
from .scripts.vagrant_boxes import _box_list, _global_status, _deps_versions
class IndexView(generic.ListView):
template_name = 'manager/index.html'
def get(self, request):
versions = _deps_versions()
vboxes = _box_list()
venvs = _global_status()
<|fim▁hole|><|fim▁end|> | return render(request, self.template_name, {'all_boxes': vboxes, 'all_envs': venvs, 'versions': versions, }) |
<|file_name|>lower_array_len.rs<|end_file_name|><|fim▁begin|>// compile-flags: -Z mir-opt-level=4
// EMIT_MIR lower_array_len.array_bound.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_bound.SimplifyLocals.diff
// EMIT_MIR lower_array_len.array_bound.InstCombine.diff
pub fn array_bound<const N: usize>(index: usize, slice: &[u8; N]) -> u8 {
if index < slice.len() {
slice[index]<|fim▁hole|>}
// EMIT_MIR lower_array_len.array_bound_mut.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_bound_mut.SimplifyLocals.diff
// EMIT_MIR lower_array_len.array_bound_mut.InstCombine.diff
pub fn array_bound_mut<const N: usize>(index: usize, slice: &mut [u8; N]) -> u8 {
if index < slice.len() {
slice[index]
} else {
slice[0] = 42;
42
}
}
// EMIT_MIR lower_array_len.array_len.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_len.SimplifyLocals.diff
// EMIT_MIR lower_array_len.array_len.InstCombine.diff
pub fn array_len<const N: usize>(arr: &[u8; N]) -> usize {
arr.len()
}
// EMIT_MIR lower_array_len.array_len_by_value.NormalizeArrayLen.diff
// EMIT_MIR lower_array_len.array_len_by_value.SimplifyLocals.diff
// EMIT_MIR lower_array_len.array_len_by_value.InstCombine.diff
pub fn array_len_by_value<const N: usize>(arr: [u8; N]) -> usize {
arr.len()
}
fn main() {
let _ = array_bound(3, &[0, 1, 2, 3]);
let mut tmp = [0, 1, 2, 3, 4];
let _ = array_bound_mut(3, &mut [0, 1, 2, 3]);
let _ = array_len(&[0]);
let _ = array_len_by_value([0, 2]);
}<|fim▁end|> | } else {
42
} |
<|file_name|>STMcmc.py<|end_file_name|><|fim▁begin|># This is STMcmc, for super tree mcmc.
# Started 18 March 2011, first commit 22 March 2011.
import pf,func
from Var import var
import math,random,string,sys,time,copy,os,cPickle,types,glob<|fim▁hole|>from Tree import Tree
import datetime
import itertools
try:
import bitarray
except ImportError:
pass
def choose(n, k):
"""
A fast way to calculate binomial coefficients
by Andrew Dalke (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
# def nSplits(n):
# mySum = 0
# for k in range(2, n-1):
# mySum += choose(n-1, k)
# return mySum
def bForN(n):
# This is the log version of this function. The max diff (in
# log(result)) between this and the non-log function seems to be
# about 2.5e-10 for n up to 10000.
prodLog = 0.0
if n > 3:
for k in range(4, n + 1):
prodLog += math.log((2 * k) - 5)
return prodLog
def BS2009_Eqn30_ZTApprox(n, beta, cT):
# This log version of this function differs from from the non-log
# version (in log(result)) by at most 6.82e-13 for n up to 150,
# over a wide range of beta (0.001 -- 1000) and cT (2 -- n/2)
myLambda = cT/(2.0*n)
tester = 0.5 * math.log((n - 3.)/myLambda)
epsilon = math.exp(-2. * beta)
bigANEpsilon = 1 + (((2. * n) - 3.) * epsilon) + (2. * ((n * n) - (4. * n) - 6.) * epsilon * epsilon)
termA = math.log(bigANEpsilon + 6 * cT * epsilon * epsilon)
if beta < tester:
termB = -(2. * beta) * (n - 3.) + (myLambda * (math.exp(2. * beta) - 1.))
termB += bForN(n)
if termA > termB:
return termA
else:
return termB
else:
return termA
def popcountA(k, nBits):
count = 0
for i in range(nBits):
tester = 1L << i
if tester > k:
return count
if tester & k:
count += 1
return count
def bitReduce(bk, txBits, lLen, sLen, allOnes):
#print "bitReduce: bk %i, txBits %i, lLen %i, sLen %i, allOnes %i" % (bk, txBits, lLen, sLen, allOnes)
newBk = 0L
counter = 0
pops = 0
for pos in range(lLen):
tester = 1L << pos
#print "pos %2i, tester: %3i" % (pos, tester)
if tester & txBits:
#print " tester & txBits -- True"
if tester & bk:
adder = 1L << counter
#print " adding:", adder
newBk += adder
pops += 1
else:
#print " not adding"
pass
counter += 1
if (1 & newBk):
#print "flipping"
newBk = allOnes ^ newBk
pops = sLen - pops
#print "returning newBk %i, pops %i" % (newBk, pops)
return newBk, pops
if 0: # test bitReduce
sk = 6 # always at least 2 bits, even
txBits = 30
lLen = 5
sLen = 4
allOnes = 15
print " sk: %3i %s" % (sk, func.getSplitStringFromKey(sk, lLen))
print "taxBits: %3i %s" % (txBits, func.getSplitStringFromKey(txBits, lLen))
rsk, popcount = bitReduce(sk, txBits, lLen, sLen, allOnes)
print " rsk: %3i %s" % (rsk, func.getSplitStringFromKey(rsk, sLen))
print " popcount %i" % popcount
# sk: 6 .**..
# taxBits: 30 .****
# rsk: 12 ..**
# popcount 2
def maskedSymmetricDifference(skk, skSet, taxBits, longLen, shortLen, allOnes):
if 0:
print "-" * 50
print "skk (skk_ppy1 from the current supertree)"
for sk in skk:
print func.getSplitStringFromKey(sk, longLen)
print "skSet (from input tree)"
for sk in skSet:
print func.getSplitStringFromKey(sk, shortLen)
print "taxBits:", taxBits, func.getSplitStringFromKey(taxBits, longLen)
newSkk = []
for sk in skk:
reducedSk, popcount = bitReduce(sk, taxBits, longLen, shortLen, allOnes)
if 0:
print "taxBits: %s " % func.getSplitStringFromKey(taxBits, longLen),
print "%4i %s " % (sk, func.getSplitStringFromKey(sk, longLen)),
print "%4i %s %i" % (reducedSk, func.getSplitStringFromKey(reducedSk, shortLen), popcount)
if popcount <= 1 or popcount >= (shortLen - 1):
pass
else:
newSkk.append(reducedSk)
newSkkSet = set(newSkk)
#print newSkkSet, skSet
#print "reduced supertree splits = newSkkSet = %s" % newSkkSet
ret = len(newSkkSet.symmetric_difference(skSet))
#print "symmetric difference %i" % ret
nCherries = 0
for sk in newSkkSet:
popcount = popcountA(sk, shortLen)
if popcount == 2:
nCherries += 1
if popcount == (shortLen - 2): # not "elif", because they might both be True
nCherries += 1
#print "nCherries %i" % nCherries
return ret, nCherries
def slowQuartetDistance(st, inputTree):
dst = st.dupe()
toRemove = []
for n in dst.iterLeavesNoRoot():
if n.name not in inputTree.taxNames:
toRemove.append(n)
for n in toRemove:
dst.removeNode(n)
qd = dst.topologyDistance(inputTree, metric='scqdist')
return qd
class STChain(object):
def __init__(self, aSTMcmc):
gm = ['STChain.__init__()']
self.stMcmc = aSTMcmc
self.tempNum = -1 # 'temp'erature, not 'temp'orary
self.curTree = aSTMcmc.tree.dupe()
self.propTree = aSTMcmc.tree.dupe()
self.logProposalRatio = 0.0
self.logPriorRatio = 0.0
self.frrf = None
self.nInTreeSplits = 0
if self.stMcmc.modelName.startswith('SR2008_rf'):
self.curTree.beta = self.stMcmc.beta
self.propTree.beta = self.stMcmc.beta
if self.stMcmc.stRFCalc == 'purePython1':
self.getTreeLogLike_ppy1()
elif self.stMcmc.stRFCalc == 'fastReducedRF':
self.startFrrf()
self.getTreeLogLike_fastReducedRF()
elif self.stMcmc.stRFCalc == 'bitarray':
self.setupBitarrayCalcs()
self.getTreeLogLike_bitarray()
self.curTree.logLike = self.propTree.logLike
elif self.stMcmc.modelName.startswith('SPA'):
self.curTree.spaQ= self.stMcmc.spaQ
self.propTree.spaQ = self.stMcmc.spaQ
for t in self.stMcmc.trees:
self.nInTreeSplits += len(t.splSet)
#print "Got nInTreeSplits %s" % self.nInTreeSplits
self.setupBitarrayCalcs()
self.getTreeLogLike_spa_bitarray()
self.curTree.logLike = self.propTree.logLike
elif self.stMcmc.modelName.startswith('QPA'):
self.curTree.spaQ= self.stMcmc.spaQ
self.propTree.spaQ = self.stMcmc.spaQ
self.nPossibleQuartets = choose(self.stMcmc.tree.nTax, 4) * 3
self.getTreeLogLike_qpa_slow()
self.curTree.logLike = self.propTree.logLike
else:
gm.append('Unknown modelName %s' % self.stMcmc.modelName)
raise Glitch, gm
if 0:
print "STChain init()"
self.curTree.draw()
print "logLike is %f" % self.curTree.logLike
def getTreeLogLike_qpa_slow(self):
gm = ["STChain.getTreeLogLike_qpa_slow()"]
if self.propTree.spaQ > 1. or self.propTree.spaQ <= 0.0:
gm.append("bad propTree.spaQ value %f" % self.propTree.spaQ)
raise Glitch, gm
for n in self.propTree.iterInternalsPostOrder():
if n == self.propTree.root:
break
n.stSplitKey = n.leftChild.stSplitKey
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
self.propTree.skk = [n.stSplitKey for n in self.propTree.iterInternalsNoRoot()]
self.propTree.qSet = set()
for sk in self.propTree.skk:
ups = [txBit for txBit in self.propTree.taxBits if (sk & txBit)]
downs = [txBit for txBit in self.propTree.taxBits if not (sk & txBit)]
for down in itertools.combinations(downs, 2):
if down[0] > down[1]:
down = (down[1], down[0])
for up in itertools.combinations(ups, 2):
if up[0] > up[1]:
up = (up[1], up[0])
if down[0] < up[0]:
self.propTree.qSet.add(down+up)
else:
self.propTree.qSet.add(up+down)
#print self.propTree.qSet
self.propTree.nQuartets = len(self.propTree.qSet)
if self.propTree.nQuartets:
q = self.propTree.spaQ / self.propTree.nQuartets
R = 1. - self.propTree.spaQ
r = R / (self.nPossibleQuartets - self.propTree.nQuartets)
logq = math.log(q)
else:
R = 1.
r = R / self.nPossibleQuartets
logr = math.log(r)
self.propTree.logLike = 0.0
for it in self.stMcmc.trees:
for qu in it.qSet:
if qu in self.propTree.qSet:
self.propTree.logLike += logq
else:
self.propTree.logLike += logr
def getTreeLogLike_spa_bitarray(self):
gm = ["STChain.getTreeLogLike_spa_bitarray"]
if self.propTree.spaQ > 1. or self.propTree.spaQ <= 0.0:
gm.append("bad propTree.spaQ value %f" % self.propTree.spaQ)
raise Glitch, gm
slowCheck = False
if slowCheck:
slowCheckLogLike = 0.0
for it in self.stMcmc.trees:
it.makeSplitKeys()
it.skk = [n.br.splitKey for n in it.iterInternalsNoRoot()]
self.propTree.logLike = 0.0
for it in self.stMcmc.trees:
if 0:
print "-" * 50
it.draw()
print "baTaxBits %s" % it.baTaxBits
print "firstTax at %i" % it.firstTax
if slowCheck:
stDupe = self.propTree.dupe()
toRemove = []
for n in stDupe.iterLeavesNoRoot():
if n.name not in it.taxNames:
toRemove.append(n)
for n in toRemove:
stDupe.removeNode(n)
stDupe.taxNames = it.taxNames
stDupe.makeSplitKeys(makeNodeForSplitKeyDict=True)
# No need to consider (masked) splits with less than two
# 1s or more than it.nTax - 2 1s.
upperGood = it.nTax - 2
relevantStSplits = []
for n in self.propTree.iterInternalsNoRoot():
# Choose which spl (spl or spl2) based on it.firstTax)
if n.ss.spl[it.firstTax]:
n.ss.theSpl = n.ss.spl
else:
n.ss.theSpl = n.ss.spl2
n.ss.maskedSplitWithTheFirstTaxOne = n.ss.theSpl & it.baTaxBits
n.ss.onesCount = n.ss.maskedSplitWithTheFirstTaxOne.count()
if 0:
print "bigT node %i" % n.nodeNum
print " theSpl is %s" % n.ss.theSpl
print " maskedSplitWithTheFirstTaxOne %s" % n.ss.maskedSplitWithTheFirstTaxOne
print " onesCount %i" % n.ss.onesCount
if n.ss.onesCount >= 2 and n.ss.onesCount <= upperGood:
print " -> relevant"
else:
print " -> not relevant"
if n.ss.onesCount >= 2 and n.ss.onesCount <= upperGood:
relevantStSplits.append(n.ss)
nonRedundantStSplits = []
for ss in relevantStSplits:
alreadyIn = False
for ssB in nonRedundantStSplits:
if ss.maskedSplitWithTheFirstTaxOne == ssB.maskedSplitWithTheFirstTaxOne:
alreadyIn = True
break
if alreadyIn == False:
nonRedundantStSplits.append(ss)
if 0:
for ss in relevantStSplits:
ss.dump()
print "There are %i relevant splits in the st for this it." % len(relevantStSplits)
for ss in nonRedundantStSplits:
ss.dump()
print "There are %i non-redundant splits in the st for this it." % len(nonRedundantStSplits)
S_st = len(nonRedundantStSplits) # S_st is the number of splits in the reduced supertree
if slowCheck:
#stDupe.draw()
#print "the drawing above is stDupe"
slowCheckS_st = len([n for n in stDupe.iterInternalsNoRoot()])
assert S_st == slowCheckS_st
S = 2**(it.nTax - 1) - (it.nTax + 1) # S is the number of possible splits in an it-sized tree
#print "S=%i, S_st=%i" % (S, S_st)
if S_st:
q = self.propTree.spaQ / S_st
R = 1. - self.propTree.spaQ
r = R/(S - S_st)
#print "q=%f" % q
logq = math.log(q)
else:
R = 1.
r = R/S
#print "r=%f" % r
logr = math.log(r)
# for ss in nonRedundantStSplits:
# ss.bytes = ss.maskedSplitWithTheFirstTaxOne.tobytes()
# ret = ss.bytes in it.splSet
# if ret:
# print " iT has reduced split %s" % ss.bytes
# self.propTree.logLike += logq
# else:
# print " iT does not have reduced split %s" % ss.bytes
# self.propTree.logLike += logr
mySSForBytesDict = {}
for ss in nonRedundantStSplits:
ss.bytes = ss.maskedSplitWithTheFirstTaxOne.tobytes()
mySSForBytesDict[ss.bytes] = ss
for spl in it.splSet:
ret = mySSForBytesDict.get(spl)
if ret:
#print " st has reduced split %s" % spl
self.propTree.logLike += logq
else:
#print " st does not have reduced split %s" % spl
self.propTree.logLike += logr
if slowCheck:
for sk in it.skk:
ret = stDupe.nodeForSplitKeyDict.get(sk)
if ret:
slowCheckLogLike += logq
else:
slowCheckLogLike += logr
myDiff = self.propTree.logLike - slowCheckLogLike
if math.fabs(myDiff) > 1.e-12:
gm.append("Bad like calc. slowCheck %f, bitarray %f, diff %g" % (
slowCheckLogLike, self.propTree.logLike, myDiff))
raise Glitch, gm
def setupBitarrayCalcs(self):
# Prepare self.propTree (ie bigT). First make n.stSplitKeys. These are temporary.
for n in self.propTree.iterPostOrder():
if n == self.propTree.root:
break
if n.isLeaf:
spot = self.stMcmc.taxNames.index(n.name)
self.stMcmc.tBits[spot] = True
n.stSplitKey = bitarray.bitarray(self.stMcmc.tBits)
self.stMcmc.tBits[spot] = False
else:
n.stSplitKey = n.leftChild.stSplitKey.copy()
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
# Next transfer the internal node split keys to BigTSplitStuff objects
for n in self.propTree.iterInternalsNoRoot():
n.ss = BigTSplitStuff()
n.ss.spl = n.stSplitKey
n.ss.spl2 = n.ss.spl.copy()
n.ss.spl2.invert()
# This next one will be empty, not used immediately, but will
# be used after supertree rearrangements.
self.propTree.root.ss = BigTSplitStuff()
def refreshBitarrayPropTree(self):
# Refresh self.propTree (ie bigT) after a topology change.
for n in self.propTree.iterPostOrder():
if n == self.propTree.root:
break
if n.isLeaf:
pass
else:
n.stSplitKey = n.leftChild.stSplitKey.copy()
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
# Next transfer the internal node split keys to BigTSplitStuff objects
for n in self.propTree.iterInternalsNoRoot():
n.ss.spl = n.stSplitKey
n.ss.spl2 = n.ss.spl.copy()
n.ss.spl2.invert()
def startFrrf(self):
# if using self.stMcmc.stRFCalc= 'fastReducedRF'
self.frrf = self.stMcmc.Frrf(len(self.stMcmc.taxNames))
self.bigTr = self.frrf.setBigT(len(self.propTree.nodes), self.propTree.nTax, self.propTree.postOrder)
for n in self.propTree.nodes:
if n.parent:
self.bigTr.setParent(n.nodeNum, n.parent.nodeNum)
if n.leftChild:
self.bigTr.setLeftChild(n.nodeNum, n.leftChild.nodeNum)
else:
self.bigTr.setNodeTaxNum(n.nodeNum, self.stMcmc.taxNames.index(n.name))
if n.sibling:
self.bigTr.setSibling(n.nodeNum, n.sibling.nodeNum)
if 1:
for t in self.stMcmc.trees:
tr = self.frrf.appendInTree(len(t.nodes), t.nTax, t.postOrder)
for n in t.nodes:
if n.parent:
tr.setParent(n.nodeNum, n.parent.nodeNum)
if n.leftChild:
tr.setLeftChild(n.nodeNum, n.leftChild.nodeNum)
else:
tr.setNodeTaxNum(n.nodeNum, self.stMcmc.taxNames.index(n.name))
if n.sibling:
tr.setSibling(n.nodeNum, n.sibling.nodeNum)
self.frrf.setInTreeTaxBits()
self.frrf.setInTreeInternalBits()
self.frrf.maybeFlipInTreeBits()
self.frrf.setBigTInternalBits()
#self.frrf.dump()
def getTreeLogLike_ppy1(self):
gm = ['STChain.getTreeLogLike_pp1']
self.propTree.makeSplitKeys()
self.propTree.skk = [n.br.splitKey for n in self.propTree.iterInternalsNoRoot()]
self.propTree.logLike = 0.0
for t in self.stMcmc.trees:
# Get the distance
thisDist = None
if self.stMcmc.modelName.startswith('SR2008_rf'):
thisDist, nCherries = maskedSymmetricDifference(self.propTree.skk, t.skSet,
t.taxBits, self.stMcmc.nTax, t.nTax, t.allOnes)
else:
raise Glitch, "STChain.getTreeLogLike_ppy1() unknown model '%s'" % self.stMcmc.modelName
# Now multiply by beta, and do approximate Z_T
assert thisDist != None
beta_distance = self.propTree.beta * thisDist
if self.stMcmc.modelName == 'SR2008_rf_ia':
self.propTree.logLike -= beta_distance
elif self.stMcmc.modelName.startswith('SR2008_rf_aZ'):
log_approxZT = BS2009_Eqn30_ZTApprox(t.nTax, self.propTree.beta, nCherries)
if 0:
# Testing, testing ...
assert self.propTree.beta == 0.1
assert t.nTax == 6
if nCherries == 2:
log_approxZT = 4.13695897651 # exact
elif nCherries == 3:
log_approxZT = 4.14853562562
self.propTree.logLike -= log_approxZT
self.propTree.logLike -= beta_distance
else:
gm.append("Unknown modelName %s" % self.stMcmc.modelName)
raise Glitch, gm
def getTreeLogLike_fastReducedRF(self):
slowCheck = False
if slowCheck:
self.getTreeLogLike_ppy1()
savedLogLike = self.propTree.logLike
self.frrf.wipeBigTPointers()
for n in self.propTree.nodes:
if n.parent:
self.bigTr.setParent(n.nodeNum, n.parent.nodeNum)
if n.leftChild:
self.bigTr.setLeftChild(n.nodeNum, n.leftChild.nodeNum)
#else:
# bigTr.setNodeTaxNum(n.nodeNum, tNames.index(n.name))
if n.sibling:
self.bigTr.setSibling(n.nodeNum, n.sibling.nodeNum)
self.frrf.setBigTInternalBits()
if self.stMcmc.modelName == 'SR2008_rf_ia':
sd = self.frrf.getSymmDiff()
self.propTree.logLike = -sd * self.propTree.beta
elif self.stMcmc.modelName.startswith('SR2008_rf_aZ'):
self.propTree.logLike = self.frrf.getLogLike(self.propTree.beta)
if slowCheck:
if self.propTree.logLike != savedLogLike:
gm = ['STChain.getTreeLogLike_fastReducedRF()']
gm.append("Slow likelihood %f" % savedLogLike)
gm.append("Fast likelihood %f" % self.propTree.logLike)
raise Glitch, gm
def getTreeLogLike_bitarray(self):
self.propTree.logLike = 0.0
slowCheck = False
if slowCheck:
self.propTree.makeSplitKeys()
self.propTree.skk = [n.br.splitKey for n in self.propTree.iterInternalsNoRoot()]
for t in self.stMcmc.trees:
if 0:
print "-" * 50
t.draw()
print "baTaxBits %s" % t.baTaxBits
print "firstTax at %i" % t.firstTax
usables = [] # splitStuff objects with onesCount >= 2 and <= t.nTax = 2
# No need to consider (masked) splits with less than two
# 1s or more than nTax - 2 1s. The nTax depends on the
# input tree.
upperGood = t.nTax - 2
for n in self.propTree.iterInternalsNoRoot():
# Choose which spl (spl or spl2) based on t.firstTax)
if n.ss.spl[t.firstTax]:
n.ss.theSpl = n.ss.spl
else:
n.ss.theSpl = n.ss.spl2
n.ss.maskedSplitWithTheFirstTaxOne = n.ss.theSpl & t.baTaxBits
n.ss.onesCount = n.ss.maskedSplitWithTheFirstTaxOne.count()
if 0:
print "bigT node %i" % n.nodeNum
print " theSpl is %s" % n.ss.theSpl
print " maskedSplitWithTheFirstTaxOne %s" % n.ss.maskedSplitWithTheFirstTaxOne
print " onesCount %i" % n.ss.onesCount
if n.ss.onesCount >= 2 and n.ss.onesCount <= upperGood:
print " -> used"
else:
print " -> not used"
if n.ss.onesCount >= 2 and n.ss.onesCount <= upperGood:
usables.append(n.ss)
usablesDict = {}
for usable in usables:
usable.bytes = usable.maskedSplitWithTheFirstTaxOne.tobytes()
usablesDict[usable.bytes] = usable
splSet = set() # bytes, for RF calculation
for usable in usables:
# splSet.add(n.ss.maskedSplitWithTheFirstTaxOne.tobytes())
splSet.add(usable.bytes)
thisBaRF = len(splSet.symmetric_difference(t.splSet))
if slowCheck: # with purePython1
thisPPyRF, thisPPyNCherries = maskedSymmetricDifference(self.propTree.skk, t.skSet,
t.taxBits, self.stMcmc.nTax, t.nTax, t.allOnes)
if thisBaRF != thisPPyRF:
raise Glitch, "bitarray and purePython1 RF calcs differ."
beta_distance = self.propTree.beta * thisBaRF
if self.stMcmc.modelName == 'SR2008_rf_ia':
self.propTree.logLike -= beta_distance
elif self.stMcmc.modelName.startswith('SR2008_rf_aZ'):
nCherries = 0
for ba in splSet:
theSS = usablesDict[ba]
#theSS.dump()
if theSS.onesCount == 2:
nCherries += 1
if theSS.onesCount == upperGood:
nCherries += 1
if slowCheck:
if nCherries != thisPPyNCherries:
raise Glitch, "bitarray and purePython1 nCherries calcs differ."
log_approxZT = BS2009_Eqn30_ZTApprox(t.nTax, self.propTree.beta, nCherries)
self.propTree.logLike -= log_approxZT
self.propTree.logLike -= beta_distance
else:
gm.append("Unknown model %s" % self.stMcmc.modelName)
raise Glitch, gm
def proposePolytomy(self, theProposal):
theProposal.doAbort = False
dbug = False
if dbug:
#print "proposePolytomy() starting with this tree ..."
#self.propTree.draw(width=80, addToBrLen=0.2)
print "j There are %i internal nodes." % self.propTree.nInternalNodes
if self.propTree.nInternalNodes == 1:
print "-> so its a star tree -> proposeDeleteEdge is not possible."
elif self.propTree.nInternalNodes == self.propTree.nTax - 2:
print "-> so its a fully-resolved tree, so proposeAddEdge is not possible."
if self.propTree.nInternalNodes == 1: # a star tree
self.proposeAddEdge(theProposal)
elif self.propTree.nInternalNodes == self.propTree.nTax - 2:
candidateNodes = self._getCandidateNodesForDeleteEdge()
if candidateNodes:
self.proposeDeleteEdge(theProposal, candidateNodes)
else:
#gm = ["proposePolytomy()"]
#gm.append("The tree is fully resolved, so I can't proposeAddEdge()")
#gm.append("But there are no suitable nodes to remove.")
#raise Glitch, gm
theProposal.doAbort = True
self.curTree._nInternalNodes = self.propTree._nInternalNodes
return
else:
r = random.random()
#r = 0.4
if r < 0.5:
self.proposeAddEdge(theProposal)
else:
candidateNodes = self._getCandidateNodesForDeleteEdge()
if candidateNodes:
self.proposeDeleteEdge(theProposal, candidateNodes)
else:
self.proposeAddEdge(theProposal)
#if self.mcmc.constraints:
# print "checkSplitKeys() at the end of polytomy"
# self.propTree.checkSplitKeys()
def proposeAddEdge(self, theProposal):
gm = ["STChain.proposeAddEdge()"]
#print "proposeAddEdge() here"
dbug = False
pTree = self.propTree
if 0:
print "proposeAddEdge(), starting with this tree ..."
pTree.draw()
print "k There are %i internal nodes." % pTree.nInternalNodes
print "root is node %i" % pTree.root.nodeNum
allPolytomies = []
for n in pTree.iterInternalsNoRoot():
if n.getNChildren() > 2:
allPolytomies.append(n)
if pTree.root.getNChildren() > 3:
allPolytomies.append(pTree.root)
theChosenPolytomy = random.choice(allPolytomies)
# We want to choose one of the possible ways to add a node. See
# Lewis et al page 246, left top. "The number of distinct ways of
# dividing k edges into two groups, making sure that at least 3
# edges are attached to each node afterwards, is 2^{k-1} - k - 1".
# For non-root polytomies (with 3 or more children), it is
# straightforward, but for root polytomies (ie with 4 or more
# children) it is different. I think in the case of root
# polytomies that they will be equivalent to non-root polytomies
# if I arbitrarily consider one randomly chosen child node to
# take the role that the parent takes in the non-root-polytomies.
# So a 4-child root will be considered to have a parent-like node
# and 3 children.
if theChosenPolytomy != pTree.root:
nChildren = theChosenPolytomy.getNChildren()
k = nChildren + 1
childrenNodeNums = pTree.getChildrenNums(theChosenPolytomy)
else:
# Its the root. So we say that a random child takes the role
# of the "parent", for purposes of these calculations.
nChildren = theChosenPolytomy.getNChildren() - 1 # n - 1 children
k = nChildren + 1
childrenNodeNums = pTree.getChildrenNums(theChosenPolytomy) # Yes, all children.
nPossibleWays = math.pow(2, k-1) - k - 1
if dbug:
print "These nodes are polytomies: %s" % [n.nodeNum for n in allPolytomies]
print "We randomly choose to do node %i" % theChosenPolytomy.nodeNum
print "It has %i children, so k=%i, so there are %i possible ways to add a node." % (
nChildren, k, nPossibleWays)
# We want to choose one of the possible ways to add a node, but we
# want to choose it randomly. I'll describe it for the case with
# nChildren=5, so k is 6. We know already that there are
# nPossibleWays=25 different ways to add a node. The complication
# is that we could make a new group of 2, 3, or 4 nInNewGroup, and it will be
# different numbers of possible ways in each. The numbers of each are given by
# func.nChoosek(), so there are 10 ways to make a group of 2 from 5
# children, 10 ways to make a group of 3 from 5 children, and 5
# ways to make a group of 4 from 5 children. So thats [10, 10,
# 5], which sums to 25 (nPossibleWays). So we can make a
# cumulative sum list ie [10, 20, 25], and use it to choose one
# group randomly.
nChooseKs = []
for i in range(2, nChildren):
nChooseKs.append(func.nChooseK(nChildren, i))
cumSum = [nChooseKs[0]]
for i in range(len(nChooseKs))[1:]:
cumSum.append(nChooseKs[i] + cumSum[i-1])
ran = random.randrange(nPossibleWays)
for i in range(len(cumSum)):
if ran < cumSum[i]:
break
nInNewGroup = i + 2
# Ok, so we have decided that of the nChildren of
# theChosenPolytomy, we will make a new node with a group of
# nInNewGroup of them. For that, we can use random.sample().
newChildrenNodeNums = random.sample(childrenNodeNums, nInNewGroup)
if dbug:
print "The nChooseKs are %s" % nChooseKs
print "The cumSum is %s" % cumSum
print "Since there are nPossibleWays=%i, we choose a random number from 0-%i" % (
nPossibleWays, nPossibleWays-1)
print "->We chose a random number: %i" % ran
print "So we choose the group at index %i, which means nInNewGroup=%i" % (i, nInNewGroup)
print "So we make a new node with newChildrenNodeNums %s" % newChildrenNodeNums
#sys.exit()
# Choose to add a node between theChosenPolytomy and the first in
# the list of newChildrenNodeNums. The node that we add will be
# chosen from pTree.nodes for the first node where both the parent
# and the leftChild are None.
firstNode = pTree.nodes[newChildrenNodeNums[0]]
for newNode in pTree.nodes:
if not newNode.parent and not newNode.leftChild:
break
#print "Got newNode = %i" % newNode.nodeNum
# Add the newNode between theChosenPolytomy and firstNode
newNode.parent = theChosenPolytomy
newNode.leftChild = firstNode
firstNode.parent = newNode
if theChosenPolytomy.leftChild == firstNode:
theChosenPolytomy.leftChild = newNode
else:
oldCh = theChosenPolytomy.leftChild
while oldCh.sibling != firstNode:
oldCh = oldCh.sibling
oldCh.sibling = newNode
if firstNode.sibling:
newNode.sibling = firstNode.sibling
firstNode.sibling = None
pTree.setPreAndPostOrder()
pTree._nInternalNodes += 1
if 0:
#pTree.setPreAndPostOrder()
pTree.draw()
for nodeNum in newChildrenNodeNums[1:]:
n = pTree.pruneSubTreeWithoutParent(nodeNum)
pTree.reconnectSubTreeWithoutParent(n, newNode)
# Calculate the rawSplitKey and splitKey.
# if self.mcmc.constraints:
# children = [n for n in newNode.iterChildren()]
# x = children[0].br.rawSplitKey
# for n in children[1:]:
# y = n.br.rawSplitKey
# x = x | y # '|' is bitwise "OR".
# newNode.br.rawSplitKey = x
# if 1 & newNode.br.rawSplitKey: # Ie "Does rawSplitKey contain a 1?" or "Is rawSplitKey odd?"
# if self.mcmc.constraints:
# newNode.br.splitKey = self.mcmc.constraints.allOnes ^ newNode.br.rawSplitKey # "^" is xor, a bit-flipper.
# else:
# allOnes = 2L**(self.propTree.nTax) - 1
# newNode.br.splitKey = allOnes ^ newNode.br.rawSplitKey
# else:
# newNode.br.splitKey = newNode.br.rawSplitKey
# Its a newly-added node, possibly in a new context. We need to
# deal with model stuff if it isHet. The model.isHet if any part
# isHet.
if dbug:
pTree.setPreAndPostOrder()
pTree.draw()
# Now the Hastings ratio. First calculate gamma_B. If the
# current tree is a star tree (nInternalNodes == 1) and the
# proposed tree is not fully resolved (ie is less than
# len(self.propTree.nodes) - 2), then gamma_B is 0.5.
if (self.curTree.nInternalNodes == 1) and (pTree.nInternalNodes < (len(pTree.nodes) - 2)):
gamma_B = 0.5
# If the proposed tree is fully resolved and the current tree is not the star tree
elif (pTree.nInternalNodes == (len(pTree.nodes) - 2)) and (self.curTree.nInternalNodes > 1):
gamma_B = 2.0
else:
gamma_B = 1.0
# n_e is number of internal edges present before the Add-edge move. That would be self.curTree.nInternalNodes - 1
n_e = float(self.curTree.nInternalNodes - 1)
# n_p is the number of polytomies present before the move, len(allPolytomies)
n_p = float(len(allPolytomies))
hastingsRatio = (gamma_B * n_p * float(nPossibleWays)) / (1.0 + n_e)
if dbug:
print "The new node is given a random branch length of %f" % newNode.br.len
print "For the Hastings ratio ..."
print "gamma_B is %.1f" % gamma_B
print "n_e is %.0f" % n_e
print "k is (still) %i, and (2^{k-1} - k - 1) = nPossibleWays is still %i" % (k, nPossibleWays)
print "n_p = %.0f is the number of polytomies present before the move." % n_p
print "So the hastings ratio is %f" % hastingsRatio
self.logProposalRatio = math.log(hastingsRatio)
if 0:
priorRatio = self.mcmc.tunings.brLenPriorLambda * math.exp(- self.mcmc.tunings.brLenPriorLambda * newNode.br.len)
if dbug:
print "The self.mcmc.tunings.brLenPriorLambda is %f" % self.mcmc.tunings.brLenPriorLambda
print "So the prior ratio is %f" % priorRatio
self.logPriorRatio = math.log(priorRatio)
# The Jacobian
jacobian = 1.0 / (self.mcmc.tunings.brLenPriorLambda * math.exp(- self.mcmc.tunings.brLenPriorLambda * newNode.br.len))
self.logJacobian = math.log(jacobian)
print "logPriorRatio = %f, logJacobian = %f" % (self.logPriorRatio, self.logJacobian)
# Here I pull a fast one, as explained in Lewis et al. The
# priorRatio and the Jacobian terms cancel out. So the logs might
# as well be zeros.
self.logPriorRatio = 0.0
#self.logJacobian = 0.0
# That was easy, wasn't it?
if self.stMcmc.tunings.doPolytomyResolutionClassPrior:
# We are gaining a node. So the prior ratio is T_{n,m + 1} /
# (T_{n,m} * C) . We have the logs, and the result is the
# log.
if 0:
print "-" * 30
print 'curTree.nInternalNodes', self.curTree.nInternalNodes
print 'pTree.nInternalNodes', pTree.nInternalNodes
print 'logBigT[curTree.nInternalNodes]', theProposal.logBigT[self.curTree.nInternalNodes]
#print math.exp(theProposal.logBigT[self.curTree.nInternalNodes])
print 'C ', self.stMcmc.tunings.polytomyPriorLogBigC
print 'logBigT[pTree.nInternalNodes]', theProposal.logBigT[pTree.nInternalNodes]
#print math.exp(theProposal.logBigT[pTree.nInternalNodes])
print "-" * 30
self.logPriorRatio = (theProposal.logBigT[self.curTree.nInternalNodes] -
(self.stMcmc.tunings.polytomyPriorLogBigC +
theProposal.logBigT[pTree.nInternalNodes]))
else:
if self.stMcmc.tunings.polytomyPriorLogBigC:
self.logPriorRatio = -self.stMcmc.tunings.polytomyPriorLogBigC
else:
self.logPriorRatio = 0.0
#print "gaining a node, m %2i->%2i. logPriorRatio is %f" % (self.curTree.nInternalNodes,
# pTree.nInternalNodes, self.logPriorRatio)
def _getCandidateNodesForDeleteEdge(self):
pTree = self.propTree
nodesWithInternalEdges = [n for n in pTree.iterInternalsNoRoot()]
# Remove any that might violate constraints.
# if self.mcmc.constraints:
# nodesToRemove = []
# for n in nodesWithInternalEdges:
# if n.br.splitKey in self.mcmc.constraints.constraints:
# nodesToRemove.append(n)
# for n in nodesToRemove:
# nodesWithInternalEdges.remove(n)
return nodesWithInternalEdges
def proposeDeleteEdge(self, theProposal, candidateNodes):
dbug = False
pTree = self.propTree
#print "doing proposeDeleteEdge()"
if 0:
print "proposeDeleteEdge(), starting with this tree ..."
pTree.draw()
print "m There are %i internal nodes (before deleting the edge)." % pTree.nInternalNodes
if not candidateNodes:
raise Glitch, "proposeDeleteEdge() could not find a good node to attempt to delete."
theChosenNode = random.choice(candidateNodes)
if dbug:
print "There are %i candidateNodes." % len(candidateNodes)
print "node nums %s" % [n.nodeNum for n in candidateNodes]
print "Randomly choose node %s" % theChosenNode.nodeNum
theNewParent = theChosenNode.parent
theRightmostChild = theChosenNode.rightmostChild()
theLeftSib = theChosenNode.leftSibling()
if theLeftSib:
theLeftSib.sibling = theChosenNode.leftChild
else:
theNewParent.leftChild = theChosenNode.leftChild
for n in theChosenNode.iterChildren():
n.parent = theNewParent
theRightmostChild.sibling = theChosenNode.sibling
theChosenNode.wipe()
pTree.setPreAndPostOrder()
pTree._nInternalNodes -= 1
#print pTree.preOrder
#if dbug:
# pTree.draw()
# Hastings ratio. First calculate the gamma_D. If the current
# tree is fully resolved and the proposed tree is not the star
# tree, then gamma_D is 0.5
if (self.curTree.nInternalNodes == len(pTree.nodes) - 2) and pTree.nInternalNodes != 1:
gamma_D = 0.5
# If the proposed tree is the star tree and the current tree is not fully resolved
elif (self.curTree.nInternalNodes < len(pTree.nodes) - 2) and pTree.nInternalNodes == 1:
gamma_D = 2.
else:
gamma_D = 1.
# n_e is the number of internal edges in existence before the move, which would be nInternalNodes - 1
n_e = float(self.curTree.nInternalNodes - 1)
# nStar_p is the number of polytomies in the tree after the move.
nStar_p = 0
for n in pTree.iterInternalsNoRoot():
if n.getNChildren() > 2:
nStar_p += 1
if pTree.root.getNChildren() > 3:
nStar_p += 1
nStar_p = float(nStar_p)
# kStar is the number of edges emanating from the polytomy created (or enlarged) by the move.
kStar = theNewParent.getNChildren()
if theNewParent.parent:
kStar += 1
hastingsRatio = (gamma_D * n_e) / (nStar_p * (2**(kStar - 1) - kStar - 1))
self.logProposalRatio = math.log(hastingsRatio)
if 0:
# Now the prior ratio. The prior probability density f(nu) for a
# branch length is lambda * exp(-lambda * nu). To a first
# approximation, with equal priors on topologies, the prior ratio
# is 1/f(nu)
priorRatio = 1.0/(self.mcmc.tunings.brLenPriorLambda * math.exp(- self.mcmc.tunings.brLenPriorLambda * theChosenNode.br.len))
if dbug:
print "The self.mcmc.tunings.brLenPriorLambda is %f" % self.mcmc.tunings.brLenPriorLambda
print "So the prior ratio is %f" % priorRatio
self.logPriorRatio = math.log(priorRatio)
# The Jacobian
jacobian = self.mcmc.tunings.brLenPriorLambda * math.exp(- self.mcmc.tunings.brLenPriorLambda * theChosenNode.br.len)
self.logJacobian = math.log(jacobian)
print "logPriorRatio = %f, logJacobian = %f" % (self.logPriorRatio, self.logJacobian)
# Here I pull a fast one, as explained in Lewis et al. The
# priorRatio and the Jacobian terms cancel out. So the logs might
# as well be zeros.
self.logPriorRatio = 0.0
#self.logJacobian = 0.0
# That was easy, wasn't it?
if self.stMcmc.tunings.doPolytomyResolutionClassPrior:
# We are losing a node. So the prior ratio is (T_{n,m} * C) /
# T_{n,m - 1}. We have the logs, and the result is the log.
if 0:
print "-" * 30
print 'curTree.nInternalNodes', self.curTree.nInternalNodes
print 'pTree.nInternalNodes', pTree.nInternalNodes
print 'logBigT[curTree.nInternalNodes]', theProposal.logBigT[self.curTree.nInternalNodes]
#print math.exp(theProposal.logBigT[self.curTree.nInternalNodes])
print 'C ', self.stMcmc.tunings.polytomyPriorLogBigC
print 'logBigT[pTree.nInternalNodes]', theProposal.logBigT[pTree.nInternalNodes]
#print math.exp(theProposal.logBigT[pTree.nInternalNodes])
print "-" * 30
self.logPriorRatio = ((theProposal.logBigT[self.curTree.nInternalNodes] +
self.stMcmc.tunings.polytomyPriorLogBigC) -
theProposal.logBigT[pTree.nInternalNodes])
else:
if self.stMcmc.tunings.polytomyPriorLogBigC:
self.logPriorRatio = self.stMcmc.tunings.polytomyPriorLogBigC
else:
self.logPriorRatio = 0.0
#print " losing a node, m %2i->%2i. logPriorRatio is %f" % (self.curTree.nInternalNodes,
# pTree.nInternalNodes, self.logPriorRatio)
def propose(self, theProposal):
gm = ['STChain.propose()']
#print "propose() About to propose %s" % theProposal.name
if theProposal.name == 'nni':
#self.proposeNni(theProposal)
self.propTree.nni() # this does setPreAndPostOrder()
if theProposal.doAbort:
pass
#else:
# if not self.propTree.preAndPostOrderAreValid: # not needed
# self.propTree.setPreAndPostOrder()
elif theProposal.name == 'spr':
self.propTree.randomSpr()
if theProposal.doAbort:
pass
else:
if not self.propTree.preAndPostOrderAreValid:
self.propTree.setPreAndPostOrder()
elif theProposal.name == 'SR2008beta_uniform':
mt = self.propTree.beta
# Slider proposal
mt += (random.random() - 0.5) * theProposal.tuning
# Linear reflect
isGood = False
myMIN = 1.e-10
myMAX = 1.e+10
while not isGood:
if mt < myMIN:
mt = (myMIN - mt) + myMIN
elif mt > myMAX:
mt = myMAX - (mt - myMAX)
else:
isGood = True
self.propTree.beta = mt
self.logProposalRatio = 0.0
self.logPriorRatio = 0.0
elif theProposal.name == 'spaQ_uniform':
mt = self.propTree.spaQ
#originally = mt
# Slider proposal
mt += (random.random() - 0.5) * theProposal.tuning
# Linear reflect
isGood = False
myMIN = 1.e-10
myMAX = 1.
while not isGood:
if mt < myMIN:
mt = (myMIN - mt) + myMIN
elif mt > myMAX:
mt = myMAX - (mt - myMAX)
else:
isGood = True
self.propTree.spaQ = mt
self.logProposalRatio = 0.0
self.logPriorRatio = 0.0
#print "proposing mt from %.3f to %.3f, diff=%g" % (originally, mt, mt-originally)
elif theProposal.name == 'polytomy':
self.proposePolytomy(theProposal)
if not self.propTree.preAndPostOrderAreValid:
self.propTree.setPreAndPostOrder()
#self.propTree.draw()
else:
gm.append('Unlisted proposal.name=%s Fix me.' % theProposal.name)
raise Glitch, gm
#return 0.0
if theProposal.doAbort:
return 0.0
else:
#print "...about to calculate the likelihood of the propTree. Model %s" % self.stMcmc.modelName
if self.stMcmc.modelName.startswith('SR2008_rf'):
if self.stMcmc.stRFCalc == 'fastReducedRF':
self.getTreeLogLike_fastReducedRF()
elif self.stMcmc.stRFCalc == 'purePython1':
self.getTreeLogLike_ppy1()
elif self.stMcmc.stRFCalc == 'bitarray':
self.refreshBitarrayPropTree()
self.getTreeLogLike_bitarray()
elif self.stMcmc.modelName == 'SPA':
self.refreshBitarrayPropTree()
self.getTreeLogLike_spa_bitarray()
elif self.stMcmc.modelName == 'QPA':
self.getTreeLogLike_qpa_slow()
else:
gm.append('Unknown model %s' % self.stMcmc.modelName)
raise Glitch, gm
#if theProposal.name == 'polytomy':
#print "propTree logLike is %f, curTree logLike is %f" % (
# self.propTree.logLike, self.curTree.logLike)
#myDist = self.propTree.topologyDistance(self.curTree)
#print "myDist %2i, propTree.logLike %.3f curTree.logLike %.3f " % (myDist, self.propTree.logLike, self.curTree.logLike)
logLikeRatio = self.propTree.logLike - self.curTree.logLike
#print logLikeRatio
#logLikeRatio = 0.0
theSum = logLikeRatio + self.logProposalRatio + self.logPriorRatio
#theSum = self.logProposalRatio + self.logPriorRatio
#if theProposal.name == 'polytomy':
# print "%f %f %f %f" % (theSum, logLikeRatio, self.logProposalRatio, self.logPriorRatio)
return theSum
def gen(self, aProposal):
gm = ['STChain.gen()']
# doAborts means that it was not a valid generation,
# neither accepted or rejected. Give up, by returning True.
acceptMove = False
#print "Doing %s" % aProposal.name
pRet = self.propose(aProposal)
#print "pRet = %.6f" % pRet,
if not aProposal.doAbort:
if pRet < -100.0: # math.exp(-100.) is 3.7200759760208361e-44
r = 0.0
elif pRet >= 0.0:
r = 1.0
else:
r = math.exp(pRet)
if r == 1.0:
acceptMove = True
elif random.random() < r:
acceptMove = True
#if aProposal.name == 'polytomy':
#print "acceptMove = %s" % acceptMove
#print "------------"
#print " %6.0f" % pRet
if 0 and acceptMove:
d1 = self.propTree.topologyDistance(self.curTree, metric='scqdist')
d2 = self.stMcmc.tree.topologyDistance(self.propTree, metric='scqdist')
print " %6.0f %5i %5i %5s" % (pRet, d1, d2, acceptMove)
aProposal.nProposals[self.tempNum] += 1
if acceptMove:
aProposal.accepted = True
aProposal.nAcceptances[self.tempNum] += 1
#if not aProposal.doAbort:
if acceptMove:
a = self.propTree
b = self.curTree
else:
a = self.curTree
b = self.propTree
if aProposal.name in ['nni', 'spr', 'polytomy']:
b.logLike = a.logLike
a.copyToTree(b)
elif aProposal.name in ['SR2008beta_uniform']:
b.logLike = a.logLike
b.beta = a.beta
elif aProposal.name in ['spaQ_uniform']:
b.logLike = a.logLike
b.spaQ = a.spaQ
else:
gm.append('Unlisted proposal.name = %s Fix me.' % aProposal.name)
raise Glitch, gm
# for proposal probs
fudgeFactor = {}
fudgeFactor['local'] = 1.5
class STMcmcTunings(object):
def __init__(self):
object.__setattr__(self, 'chainTemp', 0.15) # was 0.2
object.__setattr__(self, 'nni', None)
object.__setattr__(self, 'spr', None)
object.__setattr__(self, 'SR2008beta_uniform', 0.2)
object.__setattr__(self, 'spaQ_uniform', 0.1)
object.__setattr__(self, 'doPolytomyResolutionClassPrior', False)
object.__setattr__(self, 'polytomyPriorLogBigC', 0.0)
def __setattr__(self, item, val):
#print "Got request to set %s to %s" % (item, val)
if item in self.__dict__.keys():
# Here is where I should do the sanity checking of the new vals. Some day.
#print " Setting tuning '%s' to %s" % (item, val)
object.__setattr__(self, item, val)
else:
print self.dump()
gm = ["\nSTMcmcTunings.__setattr__()"]
gm.append("Can't set tuning '%s'-- no such tuning." % item)
raise Glitch, gm
def reprString(self, advice=True):
lst = ["\nSTMcmc.tunings:"]
spacer = ' ' * 4
lst.append("%s%20s: %s" % (spacer, 'chainTemp', self.chainTemp))
lst.append("%s%20s: %s" % (spacer, 'nni', self.nni))
lst.append("%s%20s: %s" % (spacer, 'spr', self.spr))
lst.append("%s%20s: %s" % (spacer, 'SR2008beta_uniform', self.SR2008beta_uniform))
lst.append("%s%20s: %s" % (spacer, 'spaQ_uniform', self.spaQ_uniform))
return string.join(lst, '\n')
def dump(self):
print self.reprString()
def __repr__(self):
return self.reprString()
class STMcmcProposalProbs(dict):
"""User-settable relative proposal probabilities.
An instance of this class is made as STMcmc.prob, where you can
do, for example,
yourSTMcmc.prob.nni = 2.0
These are relative proposal probs, that do not sum to 1.0, and
affect the calculation of the final proposal probabilities (ie the
kind that do sum to 1). It is a relative setting, and the default
is 1.0. Setting it to 0 turns it off. For small
probabilities, setting it to 2.0 doubles it. For bigger
probabilities, setting it to 2.0 makes it somewhat bigger.
Check the effect that it has by doing a
yourSTMcmc.writeProposalIntendedProbs()
which prints out the final calculated probabilities.
"""
def __init__(self):
object.__setattr__(self, 'nni', 1.0)
object.__setattr__(self, 'spr', 1.0)
object.__setattr__(self, 'SR2008beta_uniform', 1.0)
object.__setattr__(self, 'spaQ_uniform', 1.0)
object.__setattr__(self, 'polytomy', 0.0)
def __setattr__(self, item, val):
# complaintHead = "\nSTMcmcProposalProbs.__setattr__()"
gm = ["\nSTMcmcProposalProbs(). (set %s to %s)" % (item, val)]
theKeys = self.__dict__.keys()
if item in theKeys:
try:
val = float(val)
if val < 1e-9:
val = 0
object.__setattr__(self, item, val)
except:
gm.append("Should be a float. Got '%s'" % val)
raise Glitch, gm
else:
self.dump()
gm.append(" Can't set '%s'-- no such proposal." % item)
raise Glitch, gm
def reprString(self):
stuff = ["\nUser-settable relative proposal probabilities, from yourMcmc.prob"]
stuff.append(" To change it, do eg ")
stuff.append(" yourMcmc.prob.comp = 0.0 # turns comp proposals off")
stuff.append(" Current settings:")
theKeys = self.__dict__.keys()
theKeys.sort()
for k in theKeys:
stuff.append(" %20s: %s" % (k, getattr(self, k)))
return string.join(stuff, '\n')
def dump(self):
print self.reprString()
def __repr__(self):
return self.reprString()
class STProposal(object):
def __init__(self, theSTMcmc=None):
self.name = None
self.stMcmc = theSTMcmc # reference loop!
self.nChains = theSTMcmc.nChains
self.pNum = -1
self.mtNum = -1
self.weight = 1.0
self.nProposals = [0] * self.nChains
self.nAcceptances = [0] * self.nChains
self.accepted = 0
self.doAbort = False
self.nAborts = [0] * self.nChains
def dump(self):
print "proposal name=%-10s pNum=%2i, mtNum=%2i, weight=%5.1f, tuning=%7.2f" % (
'%s,' % self.name, self.pNum, self.mtNum, self.weight, self.tuning)
print " nProposals by temperature: %s" % self.nProposals
print " nAcceptances by temperature: %s" % self.nAcceptances
def _getTuning(self):
if self.name in ['nni', 'spr', 'SR2008beta_uniform', 'spaQ_uniform']:
#print "getting tuning for %s, returning %f" % (self.name, getattr(self.mcmc.tunings, self.name))
#print self.stMcmc.tunings
return getattr(self.stMcmc.tunings, self.name)
else:
return None
def _setTuning(self, whatever):
raise Glitch, "Can't set tuning this way."
def _delTuning(self):
raise Glitch, "Can't del tuning."
tuning = property(_getTuning, _setTuning, _delTuning)
class BigTSplitStuff(object):
# An organizer for splits on STMcmc.tree (ie bigT) internal nodes, only for use with bitarray
def __init__(self):
self.spl = None
self.spl2 = None
self.theSpl = None
self.maskedSplitWithFirstTaxOne = None
self.onesCount = None
self.bytes = None
def dump(self):
print "ss: spl=%s, spl2=%s, masked=%s, onesCount=%s" % (
self.spl, self.spl2, self.maskedSplitWithFirstTaxOne, self.onesCount)
class STMcmc(object):
"""An MCMC for making supertrees from a set of input trees.
This week, it implements the Steel and Rodrigo 2008 model, with the
alpha calculation using the approximation in Bryant and Steel 2009.
**Arguments**
inTrees
A list of p4 tree objects. You could just use ``var.trees``.
modelName
The SR2008 models implemented here are based on the Steel and
Rodrigo 2008 description of a likelihood model, "Maximum
likelihood supertrees" Syst. Biol. 57(2):243--250, 2008. At
the moment, they are all SR2008_rf, meaning that they use
Robinson-Foulds distances.
SR2008_rf_ia
Here 'ia' means 'ignore alpha'. The alpha values are not
calculated at all, as they are presumed (erroneously, but
not too badly) to cancel out.
SR2008_rf_aZ
This uses the approximation for Z_T = alpha^{-1} as described
in Equation 30 in the Bryant and Steel paper "Computing the
distribution of a tree metric" in IEEE/ACM Transactions on
computational biology and bioinformatics, VOL. 6, 2009.
SR2008_rf_aZ_fb
This is as SR2008_rf_aZ above, but additionally it allows
beta to be a free parameter, and it is sampled. Samples
are written to mcmc_prams* files.
beta
This only applies to SR2008. The beta is the weight as
given in Steel and Rodrigo 2008. By default it is 1.0.
stRFCalc
There are three ways to calculate the RF distances and
likelihood, for these SR2008_rf models above --- all giving
the same answer.
1. purePython1. Slow.
2. bitarray, using the bitarray module. About twice as fast
as purePython1
3. fastReducedRF, written in C++ using boost and ublas.
About 10 times faster than purePython1, but perhaps a bit
of a bother to get going. It needs the fastReducedRF
module, included in the p4 source code.
It is under control of the argument stRFCalc, which can be one
of 'purePython1', 'bitarray', and 'fastReducedRF'. By default
it is purePython1, so you may want to at least install
bitarray.
runNum
You may want to do more than one 'run' in the same directory,
to facilitate convergence testing. The first runNum would be
0, and samples, likelihoods, and checkPoints are written to
files with that number.
sampleInterval
Interval at which the chain is sampled, including writing a tree,
and the logLike. Plan to get perhaps 1000 samples; so if you are
planning to make a run of 10000 generations then you might set
sampleInterval=10.
checkPointInterval
Interval at which checkpoints are made. If set to None (the
default) it means don't make checkpoints. My taste is to aim to
make perhaps 2 to 4 per run. So if you are planning to start out
with a run of 10000 generations, you could set
checkPointInterval=5000, which will give you 2 checkpoints. See
more about checkpointing below.
To prepare for a run, instantiate an Mcmc object, for example::
m = STMcmc(treeList, modelName='SR2008_rf_aZ_fb', stRFCalc='fastReducedRF', sampleInterval=10)
To start it running, do this::
# Tell it the number of generations to do
m.run(10000)
As it runs, it saves trees and likelihoods at sampleInterval
intervals (actually whenever the current generation number is
evenly divisible by the sampleInterval).
**CheckPoints**
Whenever the current generation number is evenly divisible by the
checkPointInterval it will write a checkPoint file. A checkPoint
file is the whole MCMC, pickled. Using a checkPoint, you can
re-start an STMcmc from the point you left off. Or, in the event
of a crash, you can restart from the latest checkPoint. But the
most useful thing about them is that you can query checkPoints to
get information about how the chain has been running, and about
convergence diagnostics.
In order to restart the MCMC from the end of a previous run::
# read the last checkPoint file
m = func.unPickleStMcmc(0) # runNum 0
m.run(20000)
Its that easy if your previous run finished properly. However, if
your previous run has crashed and you want to restart it from a
checkPoint, then you will need to repair the sample output files
to remove samples that were taken after the last checkPoint, but
before the crash. Fix the trees, likelihoods, prams, and sims.
(You probably do not need to beware of confusing gen (eg 9999) and
gen+1 (eg 10000) issues.) When you remove trees from the tree
files be sure to leave the 'end;' at the end-- p4 needs it, and
will deal with it.
The checkPoints can help with convergence testing. To help with
that, you can use the STMcmcCheckPointReader class. It will print
out a table of average standard deviations of split supports
between 2 runs, or between 2 checkPoints from the same run. It
will print out tables of proposal acceptances to show whether they
change over the course of the MCMC.
**Making a consensus tree**
See :class:`TreePartitions`.
"""
def __init__(self, inTrees, bigT=None, modelName='SR2008_rf_aZ', beta=1.0, spaQ=0.5, stRFCalc='purePython1', runNum=0, sampleInterval=100, checkPointInterval=None):
gm = ['STMcmc.__init__()']
assert inTrees
for t in inTrees:
assert isinstance(t, Tree)
if bigT:
assert isinstance(bigT, Tree)
assert bigT.taxNames
bigT.stripBrLens()
for n in bigT.iterInternalsNoRoot():
n.name = None
goodModelNames = ['SR2008_rf_ia', 'SR2008_rf_aZ', 'SR2008_rf_aZ_fb', 'SPA', 'QPA']
if modelName not in goodModelNames:
gm.append("Arg modelName '%s' is not recognized. " % modelName)
gm.append("Good modelNames are %s" % goodModelNames)
raise Glitch, gm
self.modelName = modelName
self.stRFCalc = None
if modelName.startswith("SR2008"):
try:
fBeta = float(beta)
except ValueError:
gm.append("Arg beta (%s) should be a float" % beta)
raise Glitch, gm
self.beta = fBeta
for t in inTrees:
if t.isFullyBifurcating():
pass
else:
gm.append("At the moment STMcmc wants trees that are fully bifurcating.")
raise Glitch, gm
goodSTRFCalcNames = ['purePython1', 'bitarray', 'fastReducedRF']
if stRFCalc not in goodSTRFCalcNames:
gm.append("Arg stRFCalc '%s' is not recognized. " % modelName)
gm.append("Good stRFCalc names are %s" % goodSTRFCalcNames)
raise Glitch, gm
self.stRFCalc = stRFCalc
if modelName in ['SPA', 'QPA']:
try:
fspaQ = float(spaQ)
except ValueError:
gm.append("Arg spaQ (%s) should be a float" % spaQ)
raise Glitch, gm
self.spaQ = fspaQ
nChains = 1 # mcmcmc is off, temporarily
try:
nChains = int(nChains)
except (ValueError,TypeError):
gm.append("nChains should be an int, 1 or more. Got %s" % nChains)
raise Glitch, gm
if nChains < 1:
gm.append("nChains should be an int, 1 or more. Got %s" % nChains)
raise Glitch, gm
self.nChains = nChains
self.chains = []
self.gen = -1
self.startMinusOne = -1
self.constraints = None
self.simulate = None
try:
runNum = int(runNum)
except (ValueError, TypeError):
gm.append("runNum should be an int, 0 or more. Got %s" % runNum)
raise Glitch, gm
if runNum < 0:
gm.append("runNum should be an int, 0 or more. Got %s" % runNum)
raise Glitch, gm
self.runNum = runNum
# Check that we are not going to over-write good stuff
ff = os.listdir(os.getcwd())
hasPickle = False
for fName in ff:
if fName.startswith("mcmc_checkPoint_%i." % self.runNum):
hasPickle = True
break
if hasPickle:
gm.append("runNum is set to %i" % self.runNum)
gm.append("There is at least one mcmc_checkPoint_%i.xxx file in this directory." % self.runNum)
gm.append("This is a new STMcmc, and I am refusing to over-write exisiting files.")
gm.append("Maybe you want to re-start from the latest mcmc_checkPoint_%i file?" % self.runNum)
gm.append("Otherwise, get rid of the existing mcmc_xxx_%i.xxx files and start again." % self.runNum)
raise Glitch, gm
if var.strictRunNumberChecking:
# We want to start runs with number 0, so if runNum is more than that, check that there are other runs.
if self.runNum > 0:
for runNum2 in range(self.runNum):
hasTrees = False
for fName in ff:
if fName.startswith("mcmc_trees_%i" % runNum2):
hasTrees = True
break
if not hasTrees:
gm.append("runNum is set to %i" % self.runNum)
gm.append("runNums should go from zero up.")
gm.append("There are no mcmc_trees_%i.nex files to show that run %i has been done." % (runNum2, runNum2))
gm.append("Set the runNum to that, first.")
raise Glitch, gm
self.sampleInterval = sampleInterval
self.checkPointInterval = checkPointInterval
self.proposals = []
self.proposalsHash = {}
self.propWeights = []
self.cumPropWeights = []
self.totalPropWeights = 0.0
self.treePartitions = None
self.likesFileName = "mcmc_likes_%i" % runNum
self.treeFileName = "mcmc_trees_%i.nex" % runNum
self.pramsFileName = "mcmc_prams_%i" % runNum
self.writePrams = False
if self.modelName in ['SR2008_rf_aZ_fb', "SPA", "QPA"]:
self.writePrams = True
self.lastTimeCheck = None
if self.nChains > 1:
self.swapMatrix = []
for i in range(self.nChains):
self.swapMatrix.append([0] * self.nChains)
else:
self.swapMatrix = None
self.tunings = STMcmcTunings()
self.prob = STMcmcProposalProbs()
if self.modelName in ['SPA', 'QPA']:
self.prob.polytomy = 1.0
self.prob.spr = 0.0
# Zap internal node names
# for n in aTree.root.iterInternals():
# if n.name:
# n.name = None
if not bigT:
allNames = []
for t in inTrees:
t.unsorted_taxNames = [n.name for n in t.iterLeavesNoRoot()]
allNames += t.unsorted_taxNames # Efficient? Probably does not matter.
self.taxNames = list(set(allNames))
self.taxNames.sort() # not needed, but nice for debugging
else:
for t in inTrees:
t.unsorted_taxNames = [n.name for n in t.iterLeavesNoRoot()]
self.taxNames = bigT.taxNames
#print self.taxNames
self.nTax = len(self.taxNames)
if self.modelName in ['SPA'] or self.stRFCalc == 'bitarray':
#print "self.taxNames = ", self.taxNames
for t in inTrees:
#print "-" * 50
#t.draw()
sorted_taxNames = []
t.baTaxBits = []
for tNum in range(self.nTax):
tN = self.taxNames[tNum]
if tN in t.unsorted_taxNames:
sorted_taxNames.append(tN)
t.baTaxBits.append(True)
else:
t.baTaxBits.append(False)
t.taxNames = sorted_taxNames
t.baTaxBits = bitarray.bitarray(t.baTaxBits)
t.firstTax = t.baTaxBits.index(1)
#print "intree baTaxBits is %s" % t.baTaxBits
#print "intree firstTax is %i" % t.firstTax
# Can't use Tree.makeSplitKeys(), unfortunately. So
# make split keys here. STMcmc.tBits is only used for
# the leaves, here and in
# STChain.setupBitarrayCalcs(), and there only once,
# during STChain.__init__(). So probably does not
# need to be an instance attribute. Maybe delete?
self.tBits = [False] * self.nTax
for n in t.iterPostOrder():
if n == t.root:
break
if n.isLeaf:
spot = self.taxNames.index(n.name)
self.tBits[spot] = True
n.stSplitKey = bitarray.bitarray(self.tBits)
self.tBits[spot] = False
else:
n.stSplitKey = n.leftChild.stSplitKey.copy()
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
#print "setting node %i stSplitKey to %s" % (n.nodeNum, n.stSplitKey)
t.splSet = set()
for n in t.iterInternalsNoRoot():
if not n.stSplitKey[t.firstTax]: # make sure splitKey[firstTax] is a '1'
n.stSplitKey.invert()
n.stSplitKey &= t.baTaxBits # 'and', in-place
#print "inverting and and-ing node %i stSplitKey to %s" % (n.nodeNum, n.stSplitKey)
t.splSet.add(n.stSplitKey.tobytes()) # bytes so that I can use it as a set element
if self.modelName in ['QPA']:
for t in inTrees:
sorted_taxNames = []
t.taxBits = []
for tNum in range(self.nTax):
tN = self.taxNames[tNum]
if tN in t.unsorted_taxNames:
sorted_taxNames.append(tN)
t.taxBits.append(1L << tNum)
else:
t.taxBits.append(0)
t.taxNames = sorted_taxNames
#print "intree taxBits is %s" % t.taxBits
# Can't use Tree.makeSplitKeys(), unfortunately. So
# make split keys here. STMcmc.tBits is only used for
# the leaves, here and in
# STChain.setupBitarrayCalcs(), and there only once,
# during STChain.__init__(). So probably does not
# need to be an instance attribute. Maybe delete?
#self.tBits = [False] * self.nTax
for n in t.iterPostOrder():
if n == t.root:
break
if n.isLeaf:
spot = self.taxNames.index(n.name)
#self.tBits[spot] = True
n.stSplitKey = 1L << spot
#self.tBits[spot] = False
else:
n.stSplitKey = n.leftChild.stSplitKey
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
#print "setting node %i stSplitKey to %s" % (n.nodeNum, n.stSplitKey)
# t.splSet = set()
# for n in t.iterInternalsNoRoot():
# if not n.stSplitKey[t.firstTax]: # make sure splitKey[firstTax] is a '1'
# n.stSplitKey.invert()
# n.stSplitKey &= t.baTaxBits # 'and', in-place
# #print "inverting and and-ing node %i stSplitKey to %s" % (n.nodeNum, n.stSplitKey)
# t.splSet.add(n.stSplitKey.tobytes()) # bytes so that I can use it as a set element
t.skk = [n.stSplitKey for n in t.iterInternalsNoRoot()]
t.qSet = set()
for sk in t.skk:
ups = [txBit for txBit in t.taxBits if (sk & txBit)]
downs = [txBit for txBit in t.taxBits if not (sk & txBit)]
for down in itertools.combinations(downs, 2):
if down[0] > down[1]:
down = (down[1], down[0])
for up in itertools.combinations(ups, 2):
if up[0] > up[1]:
up = (up[1], up[0])
if down[0] < up[0]:
t.qSet.add(down+up)
else:
t.qSet.add(up+down)
#print t.qSet
t.nQuartets = len(t.qSet)
self.trees = inTrees
if bigT:
self.tree = bigT
else:
self.tree = func.randomTree(taxNames=self.taxNames, name='stTree', randomBrLens=False)
if self.stRFCalc in ['purePython1', 'fastReducedRF']:
for t in inTrees:
sorted_taxNames = []
t.taxBits = 0L
for tNum in range(self.nTax):
tN = self.taxNames[tNum]
if tN in t.unsorted_taxNames:
sorted_taxNames.append(tN)
adder = 1L << tNum
t.taxBits += adder
t.taxNames = sorted_taxNames
t.allOnes = 2L**(t.nTax) - 1
t.makeSplitKeys()
t.skSet = set([n.br.splitKey for n in t.iterInternalsNoRoot()])
if self.stRFCalc in ['purePython1', 'fastReducedRF']:
self.tree.makeSplitKeys()
self.Frrf = None
if self.stRFCalc == 'fastReducedRF':
try:
import fastReducedRF
self.Frrf = fastReducedRF.Frrf
import pyublas # not explicitly used--but makes converters available
except ImportError:
gm.append("var.stRFCalc is set to 'fastReducedRF', but I could not import")
gm.append("at least one of fastReducedRF or pyublas.")
gm.append("Make sure they are installed.")
raise Glitch, gm
if self.modelName in ['QPA']:
self.tree.taxBits = [1L << i for i in range(self.tree.nTax)]
t = self.tree
for n in t.iterPostOrder():
if n == t.root:
break
if n.isLeaf:
spot = self.taxNames.index(n.name)
n.stSplitKey = 1L << spot
else:
n.stSplitKey = n.leftChild.stSplitKey
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
t.skk = [n.stSplitKey for n in t.iterInternalsNoRoot()]
t.qSet = set()
for sk in t.skk:
ups = [txBit for txBit in t.taxBits if (sk & txBit)]
downs = [txBit for txBit in t.taxBits if not (sk & txBit)]
for down in itertools.combinations(downs, 2):
assert down[0] < down[1] # probably not needed
for up in itertools.combinations(ups, 2):
assert up[0] < up[1] # probably not needed
if down[0] < up[0]:
t.qSet.add(down+up)
else:
t.qSet.add(up+down)
#print t.qSet
t.nQuartets = len(t.qSet)
print "Initializing STMcmc"
print "%-10s: %s" % ('modelName', modelName)
if self.modelName.startswith("SR2008"):
print "%-10s: %s" % ('stRFCalc', self.stRFCalc)
print "%-10s: %s" % ('inTrees', len(self.trees))
print "%-10s: %s" % ('nTax', self.nTax)
def _makeProposals(self):
"""Make proposals for the STMcmc."""
gm = ['STMcmc._makeProposals()']
# nni
if self.prob.nni:
p = STProposal(self)
p.name = 'nni'
p.weight = self.prob.nni # * (len(self.tree.nodes) - 1) * fudgeFactor['nni']
self.proposals.append(p)
if self.prob.spr:
p = STProposal(self)
p.name = 'spr'
p.weight = self.prob.spr # * (len(self.tree.nodes) - 1) * fudgeFactor['nni']
self.proposals.append(p)
if self.modelName in ['SR2008_rf_aZ_fb']:
if self.prob.SR2008beta_uniform:
p = STProposal(self)
p.name = 'SR2008beta_uniform'
p.weight = self.prob.SR2008beta_uniform # * (len(self.tree.nodes) - 1) * fudgeFactor['nni']
self.proposals.append(p)
#object.__setattr__(self.tuningsUsage, 'local', p)
if self.modelName in ['SPA', 'QPA']:
if self.prob.spaQ_uniform:
p = STProposal(self)
p.name = 'spaQ_uniform'
p.weight = self.prob.spaQ_uniform # * (len(self.tree.nodes) - 1) * fudgeFactor['nni']
self.proposals.append(p)
#object.__setattr__(self.tuningsUsage, 'local', p)
if self.prob.polytomy:
p = STProposal(self)
p.name = 'polytomy'
p.weight = self.prob.polytomy
self.proposals.append(p)
if not self.proposals:
gm.append("No proposals?")
raise Glitch, gm
self.propWeights = []
for p in self.proposals:
self.propWeights.append(p.weight)
self.cumPropWeights = [self.propWeights[0]]
for i in range(len(self.propWeights))[1:]:
self.cumPropWeights.append(self.cumPropWeights[i - 1] + self.propWeights[i])
self.totalPropWeights = sum(self.propWeights)
if self.totalPropWeights < 1e-9:
gm.append("No proposal weights?")
raise Glitch, gm
for p in self.proposals:
self.proposalsHash[p.name] = p
def _refreshProposalProbsAndTunings(self):
"""Adjust proposals after a restart."""
gm = ['STMcmc._refreshProposalProbsAndTunings()']
for p in self.proposals:
# nni
if p.name == 'nni':
#p.weight = self.prob.local * (len(self.tree.nodes) - 1) * fudgeFactor['local']
p.weight = self.prob.nni
self.propWeights = []
for p in self.proposals:
self.propWeights.append(p.weight)
self.cumPropWeights = [self.propWeights[0]]
for i in range(len(self.propWeights))[1:]:
self.cumPropWeights.append(self.cumPropWeights[i - 1] + self.propWeights[i])
self.totalPropWeights = sum(self.propWeights)
if self.totalPropWeights < 1e-9:
gm.append("No proposal weights?")
raise Glitch, gm
def writeProposalAcceptances(self):
"""Pretty-print the proposal acceptances."""
if (self.gen - self.startMinusOne) <= 0:
print "\nSTMcmc.writeProposalAcceptances() There is no info in memory. "
print " Maybe it was just emptied after writing to a checkpoint? "
print "If so, read the checkPoint and get the proposalAcceptances from there."
else:
spacer = ' ' * 8
print "\nProposal acceptances, run %i, for %i gens, from gens %i to %i, inclusive." % (
self.runNum, (self.gen - self.startMinusOne), self.startMinusOne + 1, self.gen)
print "%s %20s %10s %13s%8s" % (spacer, 'proposal', 'nProposals', 'acceptance(%)', 'tuning')
for p in self.proposals:
print "%s" % spacer,
print "%20s" % p.name,
print "%10i" % p.nProposals[0],
if p.nProposals[0]: # Don't divide by zero
print " %5.1f " % (100.0 * float(p.nAcceptances[0]) / float(p.nProposals[0])),
else:
print " - ",
if p.tuning == None:
print " -",
elif p.tuning < 2.0:
print " %5.3f" % p.tuning,
else:
print "%7.1f" % p.tuning,
print
# # Tabulate topology changes, if any were attempted.
# doTopol = 0
# p = None
# try:
# p = self.proposalsHash['local']
# except KeyError:
# pass
# if p:
# for tNum in range(self.nChains):
# if p.nTopologyChangeAttempts[tNum]:
# doTopol = 1
# break
# if doTopol:
# p = self.proposalsHash['local']
# print "'Local' proposal-- attempted topology changes"
# print "%s tempNum nProps nAccepts percent nTopolChangeAttempts nTopolChanges percent" % spacer
# for tNum in range(self.nChains):
# print "%s" % spacer,
# print "%4i " % tNum,
# print "%9i" % p.nProposals[tNum],
# print "%8i" % p.nAcceptances[tNum],
# print " %5.1f" % (100.0 * float(p.nAcceptances[tNum]) / float(p.nProposals[tNum])),
# print "%20i" % p.nTopologyChangeAttempts[tNum],
# print "%13i" % p.nTopologyChanges[tNum],
# print " %5.1f" % (100.0 * float(p.nTopologyChanges[tNum])/float(p.nTopologyChangeAttempts[tNum]))
# else:
# print "%sFor the 'local' proposals, there were no attempted" % spacer
# print "%stopology changes in any of the chains." % spacer
# Check for aborts.
# p = None
# try:
# p = self.proposalsHash['local']
# except KeyError:
# pass
# if p:
# if hasattr(p, 'nAborts'):
# if p.nAborts[0]:
# print "The 'local' proposal had %i aborts." % p.nAborts[0]
# print "(Aborts might be due to brLen proposals too big or too small)"
# if self.constraints:
# print "(Or, more likely, due to violated constraints.)"
# else:
# print "The 'local' proposal had no aborts (either due to brLen proposals"
# print "too big or too small, or due to violated constraints)."
# for pN in ['polytomy', 'compLocation', 'rMatrixLocation', 'gdasrvLocation']:
# p = None
# try:
# p = self.proposalsHash[pN]
# except KeyError:
# pass
# if p:
# if hasattr(p, 'nAborts'):
# print "The %15s proposal had %5i aborts." % (p.name, p.nAborts[0])
def writeSwapMatrix(self):
print "\nChain swapping, for %i gens, from gens %i to %i, inclusive." % (
(self.gen - self.startMinusOne), self.startMinusOne + 1, self.gen)
print " Swaps are presented as a square matrix, nChains * nChains."
print " Upper triangle is the number of swaps proposed between two chains."
print " Lower triangle is the percent swaps accepted."
print " The current tunings.chainTemp is %5.3f\n" % self.tunings.chainTemp
print " " * 10,
for i in range(self.nChains):
print "%7i" % i,
print
print " " * 10,
for i in range(self.nChains):
print " ----",
print
for i in range(self.nChains):
print " " * 7, "%2i" % i,
for j in range(self.nChains):
if i < j: # upper triangle
print "%7i" % self.swapMatrix[i][j],
elif i == j:
print " -",
else:
if self.swapMatrix[j][i] == 0: # no proposals
print " -",
else:
print " %5.1f" % (100.0 * float(self.swapMatrix[i][j]) / float(self.swapMatrix[j][i])),
print
def _makeChainsAndProposals(self):
"""Make chains and proposals."""
gm = ['STMcmc._makeChainsAndProposals()']
#random.seed(0)
# Make chains, if needed
if not self.chains:
self.chains = []
for chNum in range(self.nChains):
aChain = STChain(self)
aChain.tempNum = chNum # Temperature. Set this way to start, but it changes.
self.chains.append(aChain)
if not self.proposals:
self._makeProposals()
# If we are going to be doing the resolution class prior
# in the polytomy move, we want to pre-compute the logs of
# T_{n,m}. Its a vector with indices (ie m) from zero to
# nTax-2 inclusive.
# if self.proposalsHash.has_key('polytomy') and self.tunings.doPolytomyResolutionClassPrior:
# p = self.proposalsHash['polytomy']
# bigT = func.nUnrootedTreesWithMultifurcations(self.tree.nTax)
# p.logBigT = [0.0] * (self.tree.nTax - 1)
# for i in range(1, self.tree.nTax - 1):
# p.logBigT[i] = math.log(bigT[i])
# #print p.logBigT
def _setOutputTreeFile(self):
"""Setup the (output) tree file for the STMcmc."""
gm = ['STMcmc._setOutputTreeFile()']
# Write the preamble for the trees outfile.
self.treeFile = file(self.treeFileName, 'w')
self.treeFile.write('#nexus\n\n')
self.treeFile.write('begin taxa;\n')
self.treeFile.write(' dimensions ntax=%s;\n' % self.tree.nTax)
self.treeFile.write(' taxlabels')
for tN in self.tree.taxNames:
self.treeFile.write(' %s' % func.nexusFixNameIfQuotesAreNeeded(tN))
self.treeFile.write(';\nend;\n\n')
self.treeFile.write('begin trees;\n')
self.translationHash = {}
i = 1
for tName in self.tree.taxNames:
self.translationHash[tName] = i
i += 1
self.treeFile.write(' translate\n')
for i in range(self.tree.nTax - 1):
self.treeFile.write(' %3i %s,\n' % (
i + 1, func.nexusFixNameIfQuotesAreNeeded(self.tree.taxNames[i])))
self.treeFile.write(' %3i %s\n' % (
self.tree.nTax, func.nexusFixNameIfQuotesAreNeeded(self.tree.taxNames[-1])))
self.treeFile.write(' ;\n')
self.treeFile.write(' [Tree numbers are gen+1]\n')
self.treeFile.close()
def run(self, nGensToDo, verbose=True):
"""Start the STMcmc running."""
gm = ['STMcmc.run()']
#Keep track of the first gen of this call to run(), maybe restart
firstGen = self.gen + 1
if self.checkPointInterval:
# We want a couple of things:
# 1. The last gen should be on checkPointInterval. For
# example, if the checkPointInterval is 200, then doing
# 100 or 300 generations will not be allowed cuz the
# chain would continue past the checkPoint-- bad. Or if
# you re-start after 500 gens and change to a
# checkPointInterval of 200, then you won't be allowed to
# do 500 gens.
#if ((self.gen + 1) + nGensToDo) % self.checkPointInterval == 0:
if nGensToDo % self.checkPointInterval == 0:
pass
else:
gm.append("With the current settings, the last generation won't be on a checkPointInterval.")
gm.append("self.gen+1=%i, nGensToDo=%i, checkPointInterval=%i" % ((self.gen + 1),
nGensToDo, self.checkPointInterval))
raise Glitch, gm
# 2. We also want the checkPointInterval to be evenly
# divisible by the sampleInterval.
if self.checkPointInterval % self.sampleInterval == 0:
pass
else:
gm.append("The checkPointInterval (%i) should be evenly divisible" % self.checkPointInterval)
gm.append("by the sampleInterval (%i)." % self.sampleInterval)
raise Glitch, gm
if self.proposals:
# Its either a re-start, or it has been thru autoTune().
# I can tell the difference by self.gen, which is -1 after
# autoTune()
if self.gen == -1:
self._makeChainsAndProposals()
self._setOutputTreeFile()
#if self.simulate:
# self.writeSimFileHeader(self.tree)
# The probs and tunings may have been changed by the user.
self._refreshProposalProbsAndTunings()
# This stuff below should be the same as is done after pickling, see below.
self.startMinusOne = self.gen
# Start the tree partitions over.
self.treePartitions = None
# Zero the proposal counts
for p in self.proposals:
p.nProposals = [0] * self.nChains
p.nAcceptances = [0] * self.nChains
p.nTopologyChangeAttempts = [0] * self.nChains
p.nTopologyChanges = [0] * self.nChains
# Zero the swap matrix
if self.nChains > 1:
self.swapMatrix = []
for i in range(self.nChains):
self.swapMatrix.append([0] * self.nChains)
else:
self._makeChainsAndProposals()
self._setOutputTreeFile()
#if self.simulate:
# self.writeSimFileHeader(self.tree)
if verbose:
self.writeProposalIntendedProbs()
sys.stdout.flush()
coldChainNum = 0
# If polytomy is turned on, then it is possible to get a star
# tree, in which case local will not work. So if we have both
# polytomy and local proposals, we should also have brLen.
# if self.proposalsHash.has_key("polytomy") and self.proposalsHash.has_key("local"):
# if not self.proposalsHash.has_key('brLen'):
# gm.append("If you have polytomy and local proposals, you should have a brLen proposal as well.")
# gm.append("It can have a low proposal probability, but it needs to be there.")
# gm.append("Turn it on by eg yourMcmc.prob.brLen = 0.001")
# raise Glitch, gm
if self.gen > -1:
# it is a re-start, so we need to back over the "end;" in the tree files.
f2 = file(self.treeFileName, 'a+')
pos = -1
while 1:
f2.seek(pos, 2)
c = f2.read(1)
if c == ';':
break
pos -= 1
#print "pos now %i" % pos
pos -= 3 # end;
f2.seek(pos, 2)
c = f2.read(4)
#print "got c = '%s'" % c
if c != "end;":
gm.append("Mcmc.run(). Failed to find and remove the 'end;' at the end of the tree file.")
raise Glitch, gm
else:
f2.seek(pos, 2)
f2.truncate()
f2.close()
if verbose:
print
print "Re-starting the MCMC run %i from gen=%i" % (self.runNum, self.gen)
print "Set to do %i more generations." % nGensToDo
#if self.writePrams:
# if self.chains[0].curTree.model.nFreePrams == 0:
# print "There are no free prams in the model, so I am turning writePrams off."
# self.writePrams = False
sys.stdout.flush()
self.startMinusOne = self.gen
else:
if verbose:
print "Starting the MCMC %s run %i" % ((self.constraints and "(with constraints)" or ""), self.runNum)
print "Set to do %i generations." % nGensToDo
if self.writePrams:
# if self.chains[0].curTree.model.nFreePrams == 0:
# print "There are no free prams in the model, so I am turning writePrams off."
# self.writePrams = False
# else:
pramsFile = file(self.pramsFileName, 'a')
if self.modelName.startswith("SR2008"):
pramsFile.write(" genPlus1 beta\n")
elif self.modelName.startswith("SPA"):
pramsFile.write(" genPlus1 spaQ\n")
elif self.modelName.startswith("QPA"):
pramsFile.write(" genPlus1 spaQ\n")
pramsFile.close()
sys.stdout.flush()
if verbose:
print "Sampling every %i." % self.sampleInterval
if self.checkPointInterval:
print "CheckPoints written every %i." % self.checkPointInterval
if nGensToDo <= 20000:
print "One dot is 100 generations."
else:
print "One dot is 1000 generations."
sys.stdout.flush()
self.treePartitions = None
realTimeStart = time.time()
self.lastTimeCheck = time.time()
abortableProposals = ['nni', 'spr', 'polytomy']
for gNum in range(nGensToDo):
self.gen += 1
#Do an initial time estimate based on 100 gens
if nGensToDo > 100 and self.gen-firstGen == 100:
diff_secs = time.time() - realTimeStart
total_secs = (float(nGensToDo)/float(100))*float(diff_secs)
deltaTime = datetime.timedelta(seconds = int(round(total_secs)))
print "Estimated completion time: %s days, %s" % (
deltaTime.days, time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds)))
# Above is a list of proposals where it is possible to abort.
# When a gen(aProposal) is made, below, aProposal.doAbort
# might be set, in which case we want to skip it for this
# gen. But we want to start each 'for chNum' loop with
# doAborts all turned off.
for chNum in range(self.nChains):
failure = True
nAttempts = 0
while failure:
# Get the next proposal
gotIt = False
safety = 0
while not gotIt:
theRan = random.uniform(0.0, self.totalPropWeights)
for i in range(len(self.cumPropWeights)):
if theRan < self.cumPropWeights[i]:
break
aProposal = self.proposals[i]
gotIt = True
if aProposal.name == 'nni':
if self.chains[chNum].curTree.nInternalNodes == 1: # Can't do nni on a star tree.
aProposal = self.proposalsHash['polytomy']
#elif aProposal.name == 'root3':
# if self.chains[chNum].curTree.nInternalNodes == 1: # Can't do root3 on a star tree.
# gotIt = False
if aProposal.doAbort:
gotIt = False
safety += 1
if safety > 1000:
gm.append("Could not find a proposal after %i attempts." % safety)
gm.append("Possibly a programming error.")
gm.append("Or possibly it is just a pathologically frustrating Mcmc.")
raise Glitch, gm
#if gNum % 2:
# aProposal = self.proposalsHash['brLen']
#else:
# aProposal = self.proposalsHash['comp']
if 0:
print "==== gNum=%i, chNum=%i, aProposal=%s (part %i)" % (
gNum, chNum, aProposal.name, aProposal.pNum),
sys.stdout.flush()
#print gNum,
failure = self.chains[chNum].gen(aProposal) # success returns None
if 0:
if failure:
print " failure"
else:
print
nAttempts += 1
if nAttempts > 1000:
gm.append("Was not able to do a successful generation after %i attempts." % nAttempts)
raise Glitch, gm
#print " Mcmc.run(). finished a gen on chain %i" % (chNum)
for pr in abortableProposals:
if self.proposalsHash.has_key(pr):
self.proposalsHash[pr].doAbort = False
# Do swap, if there is more than 1 chain.
if self.nChains == 1:
coldChain = 0
else:
# Chain swapping stuff was lifted from MrBayes. Thanks again.
chain1,chain2 = random.sample(self.chains, 2)
# Use the upper triangle of swapMatrix for nProposed's
if chain1.tempNum < chain2.tempNum:
self.swapMatrix[chain1.tempNum][chain2.tempNum] += 1
else:
self.swapMatrix[chain2.tempNum][chain1.tempNum] += 1
lnR = (1.0 / (1.0 + (self.tunings.chainTemp * chain1.tempNum))) * chain2.curTree.logLike
lnR += (1.0 / (1.0 + (self.tunings.chainTemp * chain2.tempNum))) * chain1.curTree.logLike
lnR -= (1.0 / (1.0 + (self.tunings.chainTemp * chain1.tempNum))) * chain1.curTree.logLike
lnR -= (1.0 / (1.0 + (self.tunings.chainTemp * chain2.tempNum))) * chain2.curTree.logLike
if lnR < -100.0:
r = 0.0
elif lnR >= 0.0:
r = 1.0
else:
r = math.exp(lnR)
acceptSwap = 0
if random.random() < r:
acceptSwap = 1
if acceptSwap:
# Use the lower triangle of swapMatrix to keep track of nAccepted's
if chain1.tempNum < chain2.tempNum:
self.swapMatrix[chain2.tempNum][chain1.tempNum] += 1
else:
self.swapMatrix[chain1.tempNum][chain2.tempNum] += 1
# Do the swap
chain1.tempNum, chain2.tempNum = chain2.tempNum, chain1.tempNum
# Find the cold chain, the one where tempNum is 0
coldChainNum = -1
for i in range(len(self.chains)):
if self.chains[i].tempNum == 0:
coldChainNum = i
break
if coldChainNum == -1:
gm.append("Unable to find which chain is the cold chain. Bad.")
raise Glitch, gm
# If it is a writeInterval, write stuff
if (self.gen + 1) % self.sampleInterval == 0:
if 1:
likesFile = file(self.likesFileName, 'a')
likesFile.write('%11i %f\n' % (self.gen + 1, self.chains[coldChainNum].curTree.logLike))
likesFile.close()
treeFile = file(self.treeFileName, 'a')
treeFile.write(" tree t_%i = [&U] " % (self.gen + 1))
self.chains[coldChainNum].curTree.writeNewick(treeFile,
withTranslation=1,
translationHash=self.translationHash,
doMcmcCommandComments=False)
treeFile.close()
if self.writePrams:
pramsFile = file(self.pramsFileName, 'a')
#pramsFile.write("%12i " % (self.gen + 1))
pramsFile.write("%12i" % (self.gen + 1))
if self.modelName.startswith("SR2008"):
pramsFile.write(" %f\n" % self.chains[coldChainNum].curTree.beta)
elif self.modelName in ["SPA", "QPA"]:
pramsFile.write(" %f\n" % self.chains[coldChainNum].curTree.spaQ)
pramsFile.close()
# Do a simulation
if self.simulate:
#print "about to simulate..."
self.doSimulate(self.chains[coldChainNum].curTree)
#print "...finished simulate."
# Do other stuff.
if hasattr(self, 'hook'):
self.hook(self.chains[coldChainNum].curTree)
if 0 and self.constraints:
print "Mcmc x1c"
print self.chains[0].verifyIdentityOfTwoTreesInChain()
print "b checking curTree .."
self.chains[0].curTree.checkSplitKeys()
print "b checking propTree ..."
self.chains[0].propTree.checkSplitKeys()
print "Mcmc xxx"
# Add curTree to treePartitions
if self.treePartitions:
self.treePartitions._getSplitsFromTree(self.chains[coldChainNum].curTree)
else:
self.treePartitions = TreePartitions(self.chains[coldChainNum].curTree)
# After _getSplitsFromTree, need to follow, at some point,
# with _finishSplits(). Do that when it is pickled, or at the end of the run.
# Checking and debugging constraints
if 0 and self.constraints:
print "Mcmc x1d"
print self.chains[coldChainNum].verifyIdentityOfTwoTreesInChain()
print "c checking curTree ..."
self.chains[coldChainNum].curTree.checkSplitKeys()
print "c checking propTree ..."
self.chains[coldChainNum].propTree.checkSplitKeys()
#print "c checking that all constraints are present"
#theSplits = [n.br.splitKey for n in self.chains[0].curTree.iterNodesNoRoot()]
#for sk in self.constraints.constraints:
# if sk not in theSplits:
# gm.append("split %i is not present in the curTree." % sk)
# raise Glitch, gm
print "Mcmc zzz"
# Check that the curTree has all the constraints
if self.constraints:
splitsInCurTree = [n.br.splitKey for n in self.chains[coldChainNum].curTree.iterInternalsNoRoot()]
for sk in self.constraints.constraints:
if sk not in splitsInCurTree:
gm.append("Programming error.")
gm.append("The current tree (the last tree sampled) does not contain constraint")
gm.append("%s" % func.getSplitStringFromKey(sk, self.tree.nTax))
raise Glitch, gm
# If it is a checkPointInterval, pickle
if self.checkPointInterval and (self.gen + 1) % self.checkPointInterval == 0:
self.checkPoint()
# The stuff below needs to be done in a re-start as well. See above "if self.proposals:"
self.startMinusOne = self.gen
# Start the tree partitions over.
self.treePartitions = None
# Zero the proposal counts
for p in self.proposals:
p.nProposals = [0] * self.nChains
p.nAcceptances = [0] * self.nChains
p.nTopologyChangeAttempts = [0] * self.nChains
p.nTopologyChanges = [0] * self.nChains
p.nAborts = [0] * self.nChains
# Zero the swap matrix
if self.nChains > 1:
self.swapMatrix = []
for i in range(self.nChains):
self.swapMatrix.append([0] * self.nChains)
# Reassuring pips ...
if firstGen != self.gen: #We want to skip the first gen of every call to run()
if nGensToDo <= 20000:
if (self.gen-firstGen) % 1000 == 0:
if verbose:
deltaTime = self._doTimeCheck(nGensToDo, firstGen, 1000)
if deltaTime.days:
timeString = "%s days, %s" % (
deltaTime.days, time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds)))
else:
timeString = time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds))
print "%10i - %s" % (self.gen, timeString)
else:
sys.stdout.write(".")
sys.stdout.flush()
elif (self.gen-firstGen) % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
else:
if (self.gen-firstGen) % 50000 == 0:
if verbose:
deltaTime = self._doTimeCheck(nGensToDo, firstGen, 50000)
if deltaTime.days:
timeString = "%s days, %s" % (
deltaTime.days, time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds)))
else:
timeString = time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds))
print "%10i - %s" % (self.gen, timeString)
else:
sys.stdout.write(".")
sys.stdout.flush()
elif (self.gen-firstGen) % 1000 == 0:
sys.stdout.write(".")
sys.stdout.flush()
# Gens finished. Clean up.
print
if verbose:
print "Finished %s generations." % nGensToDo
treeFile = file(self.treeFileName, 'a')
treeFile.write('end;\n\n')
treeFile.close()
def _doTimeCheck(self, nGensToDo, firstGen, genInterval):
"""Time check
firstGen is the first generation of this call to Mcmc.run() else
timing fails on restart"""
nowTime = time.time()
diff_secs = nowTime - self.lastTimeCheck
total_secs = (float(nGensToDo-(self.gen-firstGen))/float(genInterval))*float(diff_secs)
deltaTime = datetime.timedelta(seconds = int(round(total_secs)))
self.lastTimeCheck = nowTime
return deltaTime
def checkPoint(self):
# Maybe we should not save the inTrees? -- would make it more lightweight.
if 0:
for chNum in range(self.nChains):
ch = self.chains[chNum]
print "chain %i ==================" % chNum
ch.curTree.summarizeModelThingsNNodes()
# the Frrf object does not pickle
savedFrrfs = []
savedBigTrs = []
if self.stRFCalc == 'fastReducedRF':
for chNum in range(self.nChains):
ch = self.chains[chNum]
savedFrrfs.append(ch.frrf)
ch.frrf = None
savedBigTrs.append(ch.bigTr)
ch.bigTr = None
theCopy = copy.deepcopy(self)
theCopy.treePartitions._finishSplits()
theCopy.likesFile = None
theCopy.treeFile = None
#theCopy.treePartitions = None # this can be the biggest part of the pickle.
# Pickle it.
fName = "mcmc_checkPoint_%i.%i" % (self.runNum, self.gen + 1)
f = file(fName, 'w')
cPickle.dump(theCopy, f, 1)
f.close()
if self.stRFCalc == 'fastReducedRF':
for chNum in range(self.nChains):
ch = self.chains[chNum]
ch.frrf = savedFrrfs[chNum]
ch.bigTr = savedBigTrs[chNum]
def writeProposalProbs(self):
"""(Another) Pretty-print the proposal probabilities.
See also STMcmc.writeProposalAcceptances().
"""
nProposals = len(self.proposals)
if not nProposals:
print "STMcmc.writeProposalProbs(). No proposals (yet?)."
return
#intended = self.propWeights[:]
#for i in range(len(intended)):
# intended[i] /= self.totalPropWeights
#if math.fabs(sum(intended) - 1.0 > 1e-15):
# raise Glitch, 'bad sum of intended proposal probs. %s' % sum(intended)
nAttained = [0] * nProposals
nAccepted = [0] * nProposals
for i in range(nProposals):
nAttained[i] = self.proposals[i].nProposals[0]
nAccepted[i] = self.proposals[i].nAcceptances[0]
sumAttained = float(sum(nAttained)) # should be zero or nGen
if not sumAttained:
print "STMcmc.writeProposalProbs(). No proposals have been made."
print "Possibly, due to it being a checkPoint interval, nProposals have all been set to zero."
return
#assert int(sumAttained) == self.gen + 1, "sumAttained is %i, should be gen+1, %i." % (
# int(sumAttained), self.gen + 1)
probAttained = []
for i in range(len(nAttained)):
probAttained.append(100.0 * float(nAttained[i]) / sumAttained)
if math.fabs(sum(probAttained) - 100.0 > 1e-13):
raise Glitch, 'bad sum of attained proposal probs. %s' % sum(probAttained)
spacer = ' ' * 4
print "\nProposal probabilities (%)"
#print "There are %i proposals" % len(self.proposals)
print "For %i gens, from gens %i to %i, inclusive." % (
(self.gen - self.startMinusOne), self.startMinusOne + 1, self.gen)
print "%2s %11s %11s %11s %10s %23s %5s %5s" % ('', 'nProposals', 'proposed(%)',
'accepted(%)', 'tuning', 'proposal', 'part', 'num')
for i in range(len(self.proposals)):
print "%2i" % i,
p = self.proposals[i]
print " %7i " % self.proposals[i].nProposals[0],
print " %5.1f " % probAttained[i],
if nAttained[i]:
print " %5.1f " % (100.0 * float(nAccepted[i]) / float(nAttained[i])),
else:
print " - ",
if p.tuning == None:
print " - ",
elif p.tuning < 2.0:
print " %7.3f " % p.tuning,
else:
print " %7.1f " % p.tuning,
print " %20s" % p.name,
if p.pNum != -1:
print " %3i " % p.pNum,
else:
print " - ",
if p.mtNum != -1:
print " %3i " % p.mtNum,
else:
print " - ",
print
def writeProposalIntendedProbs(self):
"""Tabulate the intended proposal probabilities.
"""
nProposals = len(self.proposals)
if not nProposals:
print "STMcmc.writeProposalIntendedProbs(). No proposals (yet?)."
return
intended = self.propWeights[:]
for i in range(len(intended)):
intended[i] /= self.totalPropWeights
if math.fabs(sum(intended) - 1.0 > 1e-14):
raise Glitch, 'bad sum of intended proposal probs. %s' % sum(intended)
spacer = ' ' * 4
print "\nIntended proposal probabilities (%)"
#print "There are %i proposals" % len(self.proposals)
print "%2s %11s %23s %5s %5s" % ('', 'intended(%)', 'proposal', 'part', 'num')
for i in range(len(self.proposals)):
print "%2i" % i,
p = self.proposals[i]
print " %6.2f " % (100. * intended[i]),
print " %20s" % p.name,
if p.pNum != -1:
print " %3i " % p.pNum,
else:
print " - ",
if p.mtNum != -1:
print " %3i " % p.mtNum,
else:
print " - ",
print
class STMcmcCheckPointReader(object):
"""Read in and display mcmc_checkPoint files.
Three options--
To read in a specific checkpoint file, specify the file name by
fName=whatever
To read in the most recent (by os.path.getmtime()) checkpoint
file, say last=True
If you specify neither of the above, it will read in all the
checkPoint files that it finds.
Where it looks is determined by theGlob, which by default is '*',
ie everything in the current directory. If you want to look
somewhere else, you can specify eg
theGlob='SomeWhereElse/*'
or, if it is unambiguous, just
theGlob='S*/*'
So you might say
cpr = STMcmcCheckPointReader(theGlob='*_0.*')
to get all the checkpoints from the first run, run 0. Then, you
can tell the cpr object to do various things. Eg
cpr.writeProposalAcceptances()
But perhaps the most powerful thing about it is that it allows
easy access to the checkpointed Mcmc objects, in the list mm. Eg
to get the first one, ask for
m = cpr.mm[0]
and m is an STMcmc object, complete with all its records of
proposals and acceptances and so on. And the TreePartitions
object.
(Sorry! -- Lazy documentation. See the source code for more that it can do.)
"""
def __init__(self, fName=None, theGlob='*', last=False, verbose=True):
self.mm = []
if not fName:
#fList = [fName for fName in os.listdir(os.getcwd()) if fName.startswith("mcmc_checkPoint")]
#fList = glob.glob(theGlob)
#print "Full glob = %s" % fList
fList = [fName for fName in glob.glob(theGlob) if
os.path.basename(fName).startswith("mcmc_checkPoint")]
#print fList
if not fList:
raise Glitch, "No checkpoints found in this directory."
if last:
# Find the most recent
mostRecent = os.path.getmtime(fList[0])
mostRecentFileName = fList[0]
if len(fList) > 1:
for fName in fList[1:]:
mtime = os.path.getmtime(fName)
if mtime > mostRecent:
mostRecent = mtime
mostRecentFileName = fName
f = file(mostRecentFileName)
m = cPickle.load(f)
f.close()
self.mm.append(m)
else:
# get all the files
for fName in fList:
f = file(fName)
m = cPickle.load(f)
f.close()
self.mm.append(m)
self.mm = func.sortListOfObjectsOn2Attributes(self.mm, "gen", 'runNum')
else:
# get the file by name
f = file(fName)
m = cPickle.load(f)
f.close()
self.mm.append(m)
if verbose:
self.dump()
def dump(self):
print "STMcmcCheckPoints (%i checkPoints read)" % len(self.mm)
print "%12s %12s %12s %12s" % (" ", "index", "run", "gen+1")
print "%12s %12s %12s %12s" % (" ", "-----", "---", "-----")
for i in range(len(self.mm)):
m = self.mm[i]
#print " %2i run %2i, gen+1 %11i" % (i, m.runNum, m.gen+1)
print "%12s %12s %12s %12s" % (" ", i, m.runNum, m.gen+1)
def compareSplits(self, mNum1, mNum2, verbose=True, minimumProportion=0.1):
"""Should we be only looking at splits within the 95% ci of the topologies?"""
m1 = self.mm[mNum1]
m2 = self.mm[mNum2]
tp1 = m1.treePartitions
tp2 = m2.treePartitions
if verbose:
print "\nSTMcmcCheckPointReader.compareSplits(%i,%i)" % (mNum1, mNum2)
print "%12s %12s %12s %12s %12s" % ("mNum", "runNum", "start", "gen+1", "nTrees")
for i in range(5):
print " ---------",
print
for mNum in [mNum1, mNum2]:
print " %10i " % mNum,
m = self.mm[mNum]
print " %10i " % m.runNum,
print " %10i " % (m.startMinusOne + 1),
print " %10i " % (m.gen + 1),
#for i in m.splitCompares:
# print i
print " %10i " % m.treePartitions.nTrees
asdos = self.compareSplitsBetweenTwoTreePartitions(tp1, tp2, minimumProportion, verbose=verbose)
if asdos == None and verbose:
print "No splits > %s" % minimumProportion
return asdos
def compareSplitsBetweenTwoTreePartitions(tp1, tp2, minimumProportion, verbose=False):
ret = tp1.compareSplits(tp2, minimumProportion=minimumProportion)
if ret != []:
sumOfStdDevs = 0.0
diffs = []
if ret and len(ret):
nSplits = len(ret)
for i in ret:
#print " %.3f %.3f " % (i[2][0], i[2][1]),
stdDev = math.sqrt(func.variance(i[2]))
#print "%.5f" % stdDev
sumOfStdDevs += stdDev
diffs.append(math.fabs(i[2][0] - i[2][1]))
if verbose:
#print " %f " % sumOfStdDevs,
print " nSplits=%i, average of std devs of splits %.4f " % (nSplits, sumOfStdDevs/nSplits)
print " max difference %f, mean difference %f" % (max(diffs), sum(diffs)/nSplits)
return sumOfStdDevs/nSplits
else:
return None
compareSplitsBetweenTwoTreePartitions = staticmethod(compareSplitsBetweenTwoTreePartitions)
def compareSplitsAll(self):
nM = len(self.mm)
nItems = ((nM * nM) - nM)/2
results = np.zeros((nM, nM), np.float)
vect = np.zeros(nItems, np.float)
vCounter = 0
for mNum1 in range(1, nM):
for mNum2 in range(mNum1):
ret = self.compareSplits(mNum1, mNum2, verbose=False)
#print "+++ ret = %s" % ret
if ret == None:
ret = 0.0
results[mNum1][mNum2] = ret
results[mNum2][mNum1] = ret
vect[vCounter] = ret
vCounter += 1
if 0:
print " %10i " % mNum1,
print " %10i " % mNum2,
print "%.3f" % ret
print results
print "For the %i values in one triangle," % nItems
print "max = ", vect.max()
print "min = ", vect.min()
print "mean = ", vect.mean()
print "var = ", vect.var()
def writeProposalAcceptances(self):
for m in self.mm:
m.writeProposalAcceptances()
def writeSwapMatrices(self):
for m in self.mm:
if m.nChains > 1:
m.writeSwapMatrix()
def writeProposalProbs(self):
for m in self.mm:
m.writeProposalProbs()<|fim▁end|> | import numpy as np
from Glitch import Glitch
from TreePartitions import TreePartitions
from Constraints import Constraints |
<|file_name|>input_output.rs<|end_file_name|><|fim▁begin|>//! This module contains code to equate the input/output types appearing
//! in the MIR with the expected input/output types from the function
//! signature. This requires a bit of processing, as the expected types
//! are supplied to us before normalization and may contain opaque
//! `impl Trait` instances. In contrast, the input/output types found in
//! the MIR (specifically, in the special local variables for the
//! `RETURN_PLACE` the MIR arguments) are always fully normalized (and
//! contain revealed `impl Trait` values).
use crate::type_check::constraint_conversion::ConstraintConversion;
use rustc_index::vec::Idx;
use rustc_infer::infer::LateBoundRegionConversionTime;
use rustc_middle::mir::*;
use rustc_middle::ty::Ty;
use rustc_span::Span;
use rustc_span::DUMMY_SP;
use rustc_trait_selection::traits::query::type_op::{self, TypeOp};
use rustc_trait_selection::traits::query::Fallible;
use type_op::TypeOpOutput;
use crate::universal_regions::UniversalRegions;
use super::{Locations, TypeChecker};
impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
#[instrument(skip(self, body, universal_regions), level = "debug")]
pub(super) fn equate_inputs_and_outputs(
&mut self,
body: &Body<'tcx>,
universal_regions: &UniversalRegions<'tcx>,
normalized_inputs_and_output: &[Ty<'tcx>],
) {
let (&normalized_output_ty, normalized_input_tys) =
normalized_inputs_and_output.split_last().unwrap();
debug!(?normalized_output_ty);
debug!(?normalized_input_tys);
let mir_def_id = body.source.def_id().expect_local();
// If the user explicitly annotated the input types, extract
// those.
//
// e.g., `|x: FxHashMap<_, &'static u32>| ...`
let user_provided_sig;
if !self.tcx().is_closure(mir_def_id.to_def_id()) {
user_provided_sig = None;
} else {
let typeck_results = self.tcx().typeck(mir_def_id);
user_provided_sig = typeck_results.user_provided_sigs.get(&mir_def_id.to_def_id()).map(
|user_provided_poly_sig| {
// Instantiate the canonicalized variables from
// user-provided signature (e.g., the `_` in the code
// above) with fresh variables.
let poly_sig = self.instantiate_canonical_with_fresh_inference_vars(
body.span,
&user_provided_poly_sig,
);
// Replace the bound items in the fn sig with fresh
// variables, so that they represent the view from
// "inside" the closure.
self.infcx
.replace_bound_vars_with_fresh_vars(
body.span,
LateBoundRegionConversionTime::FnCall,
poly_sig,
)
.0
},
);
}
debug!(?normalized_input_tys, ?body.local_decls);
// Equate expected input tys with those in the MIR.
for (argument_index, &normalized_input_ty) in normalized_input_tys.iter().enumerate() {
if argument_index + 1 >= body.local_decls.len() {
self.tcx()
.sess
.delay_span_bug(body.span, "found more normalized_input_ty than local_decls");
break;
}
// In MIR, argument N is stored in local N+1.
let local = Local::new(argument_index + 1);
let mir_input_ty = body.local_decls[local].ty;
let mir_input_span = body.local_decls[local].source_info.span;
self.equate_normalized_input_or_output(
normalized_input_ty,
mir_input_ty,
mir_input_span,
);
}
if let Some(user_provided_sig) = user_provided_sig {
for (argument_index, &user_provided_input_ty) in
user_provided_sig.inputs().iter().enumerate()
{
// In MIR, closures begin an implicit `self`, so
// argument N is stored in local N+2.
let local = Local::new(argument_index + 2);
let mir_input_ty = body.local_decls[local].ty;
let mir_input_span = body.local_decls[local].source_info.span;
// If the user explicitly annotated the input types, enforce those.
let user_provided_input_ty =
self.normalize(user_provided_input_ty, Locations::All(mir_input_span));
self.equate_normalized_input_or_output(
user_provided_input_ty,
mir_input_ty,
mir_input_span,
);
}
}
assert!(body.yield_ty().is_some() == universal_regions.yield_ty.is_some());
if let Some(mir_yield_ty) = body.yield_ty() {
let ur_yield_ty = universal_regions.yield_ty.unwrap();
let yield_span = body.local_decls[RETURN_PLACE].source_info.span;
self.equate_normalized_input_or_output(ur_yield_ty, mir_yield_ty, yield_span);
}
// Return types are a bit more complex. They may contain opaque `impl Trait` types.
let mir_output_ty = body.local_decls[RETURN_PLACE].ty;
let output_span = body.local_decls[RETURN_PLACE].source_info.span;
if let Err(terr) = self.eq_opaque_type_and_type(
mir_output_ty,
normalized_output_ty,<|fim▁hole|> ConstraintCategory::BoringNoLocation,
) {
span_mirbug!(
self,
Location::START,
"equate_inputs_and_outputs: `{:?}=={:?}` failed with `{:?}`",
normalized_output_ty,
mir_output_ty,
terr
);
};
// If the user explicitly annotated the output types, enforce those.
// Note that this only happens for closures.
if let Some(user_provided_sig) = user_provided_sig {
let user_provided_output_ty = user_provided_sig.output();
let user_provided_output_ty =
self.normalize(user_provided_output_ty, Locations::All(output_span));
if let Err(err) = self.eq_opaque_type_and_type(
mir_output_ty,
user_provided_output_ty,
Locations::All(output_span),
ConstraintCategory::BoringNoLocation,
) {
span_mirbug!(
self,
Location::START,
"equate_inputs_and_outputs: `{:?}=={:?}` failed with `{:?}`",
mir_output_ty,
user_provided_output_ty,
err
);
}
}
}
#[instrument(skip(self, span), level = "debug")]
fn equate_normalized_input_or_output(&mut self, a: Ty<'tcx>, b: Ty<'tcx>, span: Span) {
if let Err(_) =
self.eq_types(a, b, Locations::All(span), ConstraintCategory::BoringNoLocation)
{
// FIXME(jackh726): This is a hack. It's somewhat like
// `rustc_traits::normalize_after_erasing_regions`. Ideally, we'd
// like to normalize *before* inserting into `local_decls`, but
// doing so ends up causing some other trouble.
let b = match self.normalize_and_add_constraints(b) {
Ok(n) => n,
Err(_) => {
debug!("equate_inputs_and_outputs: NoSolution");
b
}
};
// Note: if we have to introduce new placeholders during normalization above, then we won't have
// added those universes to the universe info, which we would want in `relate_tys`.
if let Err(terr) =
self.eq_types(a, b, Locations::All(span), ConstraintCategory::BoringNoLocation)
{
span_mirbug!(
self,
Location::START,
"equate_normalized_input_or_output: `{:?}=={:?}` failed with `{:?}`",
a,
b,
terr
);
}
}
}
pub(crate) fn normalize_and_add_constraints(&mut self, t: Ty<'tcx>) -> Fallible<Ty<'tcx>> {
let TypeOpOutput { output: norm_ty, constraints, .. } =
self.param_env.and(type_op::normalize::Normalize::new(t)).fully_perform(self.infcx)?;
debug!("{:?} normalized to {:?}", t, norm_ty);
for data in constraints.into_iter().collect::<Vec<_>>() {
ConstraintConversion::new(
self.infcx,
&self.borrowck_context.universal_regions,
&self.region_bound_pairs,
Some(self.implicit_region_bound),
self.param_env,
Locations::All(DUMMY_SP),
ConstraintCategory::Internal,
&mut self.borrowck_context.constraints,
)
.convert_all(&*data);
}
Ok(norm_ty)
}
}<|fim▁end|> | Locations::All(output_span), |
<|file_name|>unique-in-vec-copy.rs<|end_file_name|><|fim▁begin|>// run-pass
#![feature(box_syntax)]
pub fn main() {
let mut a: Vec<Box<_>> = vec![box 10];
let b = a.clone();
assert_eq!(*a[0], 10);
assert_eq!(*b[0], 10);
// This should only modify the value in a, not b
*a[0] = 20;
assert_eq!(*a[0], 20);
assert_eq!(*b[0], 10);<|fim▁hole|><|fim▁end|> | } |
<|file_name|>enclaveapi.rs<|end_file_name|><|fim▁begin|>// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use shared::basetsd::{PSIZE_T, SIZE_T};
use shared::minwindef::{BOOL, DWORD, LPCVOID, LPDWORD, LPVOID};
use shared::ntdef::{HANDLE};
use um::minwinbase::LPENCLAVE_ROUTINE;
use um::winnt::{LPCSTR, LPCWSTR};
extern "system" {
pub fn IsEnclaveTypeSupported(
flEnclaveType: DWORD,
) -> BOOL;
pub fn CreateEnclave(
hProcess: HANDLE,
lpAddress: LPVOID,
dwSize: SIZE_T,
dwInitialCommitment: SIZE_T,
flEnclaveType: DWORD,
lpEnclaveInformation: LPCVOID,
dwInfoLength: DWORD,
lpEnclaveError: LPDWORD,
) -> LPVOID;
pub fn LoadEnclaveData(
hProcess: HANDLE,
lpAddress: LPVOID,<|fim▁hole|> nSize: SIZE_T,
flProtect: DWORD,
lpPageInformation: LPCVOID,
dwInfoLength: DWORD,
lpNumberOfBytesWritten: PSIZE_T,
lpEnclaveError: LPDWORD,
) -> BOOL;
pub fn InitializeEnclave(
hProcess: HANDLE,
lpAddress: LPVOID,
lpEnclaveInformation: LPCVOID,
dwInfoLength: DWORD,
lpEnclaveError: LPDWORD,
) -> BOOL;
pub fn LoadEnclaveImageA(
lpEnclaveAddress: LPVOID,
lpImageName: LPCSTR,
) -> BOOL;
pub fn LoadEnclaveImageW(
lpEnclaveAddress: LPVOID,
lpImageName: LPCWSTR,
) -> BOOL;
pub fn CallEnclave(
lpRoutine: LPENCLAVE_ROUTINE,
lpParameter: LPVOID,
fWaitForThread: BOOL,
lpReturnValue: *mut LPVOID,
) -> BOOL;
pub fn TerminateEnclave(
lpAddress: LPVOID,
fWait: BOOL,
) -> BOOL;
pub fn DeleteEnclave(
lpAddress: LPVOID,
) -> BOOL;
}<|fim▁end|> | lpBuffer: LPCVOID, |
<|file_name|>seqpeek_view.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from builtins import map
from builtins import str
from builtins import object
from copy import deepcopy
import logging
from bq_data_access.v2.seqpeek.seqpeek_interpro import InterProDataProvider
logger = logging.getLogger('main_logger')
SAMPLE_ID_FIELD_NAME = 'sample_id'
TRACK_ID_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'uniprot_aapos'
PROTEIN_ID_FIELD = 'ensg_id'
PROTEIN_DOMAIN_DB = 'PFAM'
SEQPEEK_VIEW_DEBUG_MODE = False
def get_number_of_unique_samples(track):
sample_ids = set()
for mutation in track['mutations']:
sample_ids.add(mutation[SAMPLE_ID_FIELD_NAME])
return len(sample_ids)
def get_number_of_mutated_positions(track):
sample_locations = set()
for mutation in track['mutations']:
sample_locations.add(mutation[COORDINATE_FIELD_NAME])
return len(sample_locations)
# TODO remove if not needed
def clean_track_mutations(mutations_array):
retval = []
for mutation in mutations_array:
cleaned = deepcopy(mutation)
cleaned[COORDINATE_FIELD_NAME] = int(mutation[COORDINATE_FIELD_NAME])
retval.append(cleaned)
return retval
def sort_track_mutations(mutations_array):
return sorted(mutations_array, key=lambda k: k[COORDINATE_FIELD_NAME])
def get_track_statistics_by_track_type(track, cohort_info_map):
track_id = track[TRACK_ID_FIELD]
result = {
'samples': {
'numberOf': get_number_of_unique_samples(track),
'mutated_positions': get_number_of_mutated_positions(track)
}
}
if track['type'] == 'tumor':
cohort_info = cohort_info_map[track_id]
result['cohort_size'] = cohort_info['size']
else:
# Do not assign cohort size for the 'COMBINED' track.
result['cohort_size'] = None
return result
def filter_protein_domains(match_array):
return [m for m in match_array if m['dbname'] == PROTEIN_DOMAIN_DB]
def get_table_row_id(tumor_type):
return "seqpeek_row_{0}".format(tumor_type)
def build_seqpeek_regions(protein_data):
return [{
'type': 'exon',
'start': 0,
'end': protein_data['length']
}]
def build_summary_track(tracks):
all = []
for track in tracks:
all.extend(track["mutations"])
return {
'mutations': all,
'label': 'COMBINED',
'tumor': 'none-combined',
'type': 'summary'
}
def get_track_label_and_cohort_information(track_id_value, cohort_info_map):
cohort_info = cohort_info_map[track_id_value]
label = cohort_info['name']
cohort_size = cohort_info['size']
return label, cohort_size
def get_track_label(track, cohort_info_array):
# The IDs in cohort_info_array are integers, whereas the track IDs are strings.
cohort_map = {str(item['id']): item['name'] for item in cohort_info_array}
return cohort_map[track[TRACK_ID_FIELD]]
def get_protein_domains(uniprot_id):
protein = InterProDataProvider().get_data(uniprot_id)
return protein
class MAFData(object):
def __init__(self, cohort_info, data):
self.cohort_info = cohort_info
self.data = data
@classmethod
def from_dict(cls, param):
return cls(param['cohort_set'], param['items'])
def build_track_data(track_id_list, all_tumor_mutations):
tracks = []
for track_id in track_id_list:
tracks.append({
TRACK_ID_FIELD: track_id,
'mutations': [m for m in all_tumor_mutations if int(track_id) in set(m['cohort'])]
})
return tracks
def find_uniprot_id(mutations):
uniprot_id = None
for m in mutations:
if PROTEIN_ID_FIELD in m:
uniprot_id = m[PROTEIN_ID_FIELD]
break
return uniprot_id
def get_genes_tumors_lists_debug():
return {
'symbol_list': ['EGFR', 'TP53', 'PTEN'],
'disease_codes': ['ACC', 'BRCA', 'GBM']
}
def get_genes_tumors_lists_remote():
context = {
'symbol_list': [],
'track_id_list': []
}
return context
def get_genes_tumors_lists():
if SEQPEEK_VIEW_DEBUG_MODE:
return get_genes_tumors_lists_debug()
else:
return get_genes_tumors_lists_remote()
def get_track_id_list(param):
return list(map(str, param))
def format_removed_row_statistics_to_list(stats_dict):
result = []
for key, value in list(stats_dict.items()):
result.append({
'name': key,
'num': value
})
return result
class SeqPeekViewDataBuilder(object):
def build_view_data(self, hugo_symbol, filtered_maf_vector, seqpeek_cohort_info, cohort_id_list, removed_row_statistics, tables_used):<|fim▁hole|> context = get_genes_tumors_lists()
cohort_info_map = {str(item['id']): item for item in seqpeek_cohort_info}
track_id_list = get_track_id_list(cohort_id_list)
# Since the gene (hugo_symbol) parameter is part of the GNAB feature ID,
# it will be sanity-checked in the SeqPeekMAFDataAccess instance.
uniprot_id = find_uniprot_id(filtered_maf_vector)
logging.info("UniProt ID: " + str(uniprot_id))
protein_data = get_protein_domains(uniprot_id)
track_data = build_track_data(track_id_list, filtered_maf_vector)
plot_data = {
'gene_label': hugo_symbol,
'tracks': track_data,
'protein': protein_data
}
# Pre-processing
# - Sort mutations by chromosomal coordinate
for track in plot_data['tracks']:
track['mutations'] = sort_track_mutations(track['mutations'])
# Annotations
# - Add label, possibly human readable
# - Add type that indicates whether the track is driven by data from search or
# if the track is aggregate
for track in plot_data['tracks']:
track['type'] = 'tumor'
label, cohort_size = get_track_label_and_cohort_information(track[TRACK_ID_FIELD], cohort_info_map)
track['label'] = label
# Display the "combined" track only if more than one cohort is visualized
if len(cohort_id_list) >= 2:
plot_data['tracks'].append(build_summary_track(plot_data['tracks']))
for track in plot_data['tracks']:
# Calculate statistics
track['statistics'] = get_track_statistics_by_track_type(track, cohort_info_map)
# Unique ID for each row
track['render_info'] = {
'row_id': get_table_row_id(track[TRACK_ID_FIELD])
}
plot_data['regions'] = build_seqpeek_regions(plot_data['protein'])
plot_data['protein']['matches'] = filter_protein_domains(plot_data['protein']['matches'])
tumor_list = ','.join(track_id_list)
context.update({
'plot_data': plot_data,
'hugo_symbol': hugo_symbol,
'tumor_list': tumor_list,
'cohort_id_list': track_id_list,
'removed_row_statistics': format_removed_row_statistics_to_list(removed_row_statistics),
'bq_tables': list(set(tables_used))
})
return context<|fim▁end|> | |
<|file_name|>consts.rs<|end_file_name|><|fim▁begin|>use Pixel;
pub const ALICE_BLUE: Pixel = Pixel {
r: 240,
g: 248,
b: 255,
};
pub const ANTIQUE_WHITE: Pixel = Pixel {
r: 250,
g: 235,
b: 215,
};
pub const AQUA: Pixel = Pixel {
r: 0,
g: 255,
b: 255,
};
pub const AQUAMARINE: Pixel = Pixel {
r: 127,
g: 255,
b: 212,
};
pub const AZURE: Pixel = Pixel {
r: 240,
g: 255,
b: 255,
};
pub const BEIGE: Pixel = Pixel {
r: 245,
g: 245,
b: 220,
};
pub const BISQUE: Pixel = Pixel {
r: 255,
g: 228,
b: 196,
};
pub const BLACK: Pixel = Pixel { r: 0, g: 0, b: 0 };
pub const BLANCHED_ALMOND: Pixel = Pixel {
r: 255,
g: 235,
b: 205,
};
pub const BLUE: Pixel = Pixel { r: 0, g: 0, b: 255 };
pub const BLUE_VIOLET: Pixel = Pixel {
r: 138,
g: 43,
b: 226,
};
pub const BROWN: Pixel = Pixel {
r: 165,
g: 42,
b: 42,
};
pub const BURLYWOOD: Pixel = Pixel {
r: 222,
g: 184,
b: 135,
};
pub const CADET_BLUE: Pixel = Pixel {
r: 95,
g: 158,
b: 160,
};
pub const CHARTREUSE: Pixel = Pixel {
r: 127,
g: 255,
b: 0,
};
pub const CHOCOLATE: Pixel = Pixel {
r: 210,
g: 105,
b: 30,
};
pub const CORAL: Pixel = Pixel {
r: 255,
g: 127,
b: 80,
};
pub const CORNFLOWER_BLUE: Pixel = Pixel {
r: 100,
g: 149,
b: 237,
};
pub const CORNSILK: Pixel = Pixel {
r: 255,
g: 248,
b: 220,
};
pub const CRIMSON: Pixel = Pixel {
r: 220,
g: 20,
b: 60,
};
pub const CYAN: Pixel = Pixel {
r: 0,
g: 255,
b: 255,
};
pub const DARK_BLUE: Pixel = Pixel { r: 0, g: 0, b: 139 };
pub const DARK_CYAN: Pixel = Pixel {
r: 0,
g: 139,
b: 139,
};
pub const DARK_GOLDENROD: Pixel = Pixel {
r: 184,
g: 134,
b: 11,
};
pub const DARK_GRAY: Pixel = Pixel {
r: 169,
g: 169,
b: 169,
};
pub const DARK_GREEN: Pixel = Pixel { r: 0, g: 100, b: 0 };
pub const DARK_GREY: Pixel = Pixel {
r: 169,
g: 169,
b: 169,
};
pub const DARK_KHAKI: Pixel = Pixel {
r: 189,
g: 183,
b: 107,
};
pub const DARK_MAGENTA: Pixel = Pixel {
r: 139,
g: 0,
b: 139,
};
pub const DARK_OLIVE_GREEN: Pixel = Pixel {
r: 85,
g: 107,
b: 47,
};
pub const DARK_ORANGE: Pixel = Pixel {
r: 255,
g: 140,
b: 0,
};
pub const DARK_ORCHID: Pixel = Pixel {
r: 153,
g: 50,
b: 204,
};
pub const DARK_RED: Pixel = Pixel { r: 139, g: 0, b: 0 };
pub const DARK_SALMON: Pixel = Pixel {
r: 233,
g: 150,
b: 122,
};
pub const DARK_SEAGREEN: Pixel = Pixel {
r: 143,
g: 188,
b: 143,
};
pub const DARK_SLATE_BLUE: Pixel = Pixel {
r: 72,
g: 61,
b: 139,
};
pub const DARK_SLATE_GRAY: Pixel = Pixel {
r: 47,
g: 79,
b: 79,
};
pub const DARK_SLATE_GREY: Pixel = Pixel {
r: 47,
g: 79,
b: 79,
};
pub const DARK_TURQUOISE: Pixel = Pixel {
r: 0,
g: 206,
b: 209,
};
pub const DARK_VIOLET: Pixel = Pixel {
r: 148,
g: 0,
b: 211,
};
pub const DEEP_PINK: Pixel = Pixel {
r: 255,
g: 20,
b: 147,
};
pub const DEEP_SKYBLUE: Pixel = Pixel {<|fim▁hole|>};
pub const DIM_GRAY: Pixel = Pixel {
r: 105,
g: 105,
b: 105,
};
pub const DIM_GREY: Pixel = Pixel {
r: 105,
g: 105,
b: 105,
};
pub const DODGER_BLUE: Pixel = Pixel {
r: 30,
g: 144,
b: 255,
};
pub const FIREBRICK: Pixel = Pixel {
r: 178,
g: 34,
b: 34,
};
pub const FLORAL_WHITE: Pixel = Pixel {
r: 255,
g: 250,
b: 240,
};
pub const FOREST_GREEN: Pixel = Pixel {
r: 34,
g: 139,
b: 34,
};
pub const FUCHSIA: Pixel = Pixel {
r: 255,
g: 0,
b: 255,
};
pub const GAINSBORO: Pixel = Pixel {
r: 220,
g: 220,
b: 220,
};
pub const GHOST_WHITE: Pixel = Pixel {
r: 248,
g: 248,
b: 255,
};
pub const GOLD: Pixel = Pixel {
r: 255,
g: 215,
b: 0,
};
pub const GOLDENROD: Pixel = Pixel {
r: 218,
g: 165,
b: 32,
};
pub const GRAY: Pixel = Pixel {
r: 128,
g: 128,
b: 128,
};
pub const GREY: Pixel = Pixel {
r: 128,
g: 128,
b: 128,
};
pub const GREEN: Pixel = Pixel { r: 0, g: 128, b: 0 };
pub const GREEN_YELLOW: Pixel = Pixel {
r: 173,
g: 255,
b: 47,
};
pub const HONEYDEW: Pixel = Pixel {
r: 240,
g: 255,
b: 240,
};
pub const HOT_PINK: Pixel = Pixel {
r: 255,
g: 105,
b: 180,
};
pub const INDIAN_RED: Pixel = Pixel {
r: 205,
g: 92,
b: 92,
};
pub const INDIGO: Pixel = Pixel {
r: 75,
g: 0,
b: 130,
};
pub const IVORY: Pixel = Pixel {
r: 255,
g: 255,
b: 240,
};
pub const KHAKI: Pixel = Pixel {
r: 240,
g: 230,
b: 140,
};
pub const LAVENDER: Pixel = Pixel {
r: 230,
g: 230,
b: 250,
};
pub const LAVENDERBLUSH: Pixel = Pixel {
r: 255,
g: 240,
b: 245,
};
pub const LAWN_GREEN: Pixel = Pixel {
r: 124,
g: 252,
b: 0,
};
pub const LEMON_CHIFFON: Pixel = Pixel {
r: 255,
g: 250,
b: 205,
};
pub const LIGHT_BLUE: Pixel = Pixel {
r: 173,
g: 216,
b: 230,
};
pub const LIGHT_CORAL: Pixel = Pixel {
r: 240,
g: 128,
b: 128,
};
pub const LIGHT_CYAN: Pixel = Pixel {
r: 224,
g: 255,
b: 255,
};
pub const LIGHT_GOLDENROD_YELLOW: Pixel = Pixel {
r: 250,
g: 250,
b: 210,
};
pub const LIGHT_GRAY: Pixel = Pixel {
r: 211,
g: 211,
b: 211,
};
pub const LIGHT_GREEN: Pixel = Pixel {
r: 144,
g: 238,
b: 144,
};
pub const LIGHT_GREY: Pixel = Pixel {
r: 211,
g: 211,
b: 211,
};
pub const LIGHT_PINK: Pixel = Pixel {
r: 255,
g: 182,
b: 193,
};
pub const LIGHT_SALMON: Pixel = Pixel {
r: 255,
g: 160,
b: 122,
};
pub const LIGHT_SEAGREEN: Pixel = Pixel {
r: 32,
g: 178,
b: 170,
};
pub const LIGHT_SKYBLUE: Pixel = Pixel {
r: 135,
g: 206,
b: 250,
};
pub const LIGHT_SLATE_GRAY: Pixel = Pixel {
r: 119,
g: 136,
b: 153,
};
pub const LIGHT_SLATE_GREY: Pixel = Pixel {
r: 119,
g: 136,
b: 153,
};
pub const LIGHT_STEEL_BLUE: Pixel = Pixel {
r: 176,
g: 196,
b: 222,
};
pub const LIGHT_YELLOW: Pixel = Pixel {
r: 255,
g: 255,
b: 224,
};
pub const LIME: Pixel = Pixel { r: 0, g: 255, b: 0 };
pub const LIME_GREEN: Pixel = Pixel {
r: 50,
g: 205,
b: 50,
};
pub const LINEN: Pixel = Pixel {
r: 250,
g: 240,
b: 230,
};
pub const MAGENTA: Pixel = Pixel {
r: 255,
g: 0,
b: 255,
};
pub const MAROON: Pixel = Pixel { r: 128, g: 0, b: 0 };
pub const MEDIUM_AQUAMARINE: Pixel = Pixel {
r: 102,
g: 205,
b: 170,
};
pub const MEDIUM_BLUE: Pixel = Pixel { r: 0, g: 0, b: 205 };
pub const MEDIUM_ORCHID: Pixel = Pixel {
r: 186,
g: 85,
b: 211,
};
pub const MEDIUM_PURPLE: Pixel = Pixel {
r: 147,
g: 112,
b: 219,
};
pub const MEDIUM_SEAGREEN: Pixel = Pixel {
r: 60,
g: 179,
b: 113,
};
pub const MEDIUM_SLATE_BLUE: Pixel = Pixel {
r: 123,
g: 104,
b: 238,
};
pub const MEDIUM_SPRING_GREEN: Pixel = Pixel {
r: 0,
g: 250,
b: 154,
};
pub const MEDIUM_TURQUOISE: Pixel = Pixel {
r: 72,
g: 209,
b: 204,
};
pub const MEDIUM_VIOLETRED: Pixel = Pixel {
r: 199,
g: 21,
b: 133,
};
pub const MIDNIGHT_BLUE: Pixel = Pixel {
r: 25,
g: 25,
b: 112,
};
pub const MINT_CREAM: Pixel = Pixel {
r: 245,
g: 255,
b: 250,
};
pub const MISTY_ROSE: Pixel = Pixel {
r: 255,
g: 228,
b: 225,
};
pub const MOCCASIN: Pixel = Pixel {
r: 255,
g: 228,
b: 181,
};
pub const NAVAJO_WHITE: Pixel = Pixel {
r: 255,
g: 222,
b: 173,
};
pub const NAVY: Pixel = Pixel { r: 0, g: 0, b: 128 };
pub const OLD_LACE: Pixel = Pixel {
r: 253,
g: 245,
b: 230,
};
pub const OLIVE: Pixel = Pixel {
r: 128,
g: 128,
b: 0,
};
pub const OLIVE_DRAB: Pixel = Pixel {
r: 107,
g: 142,
b: 35,
};
pub const ORANGE: Pixel = Pixel {
r: 255,
g: 165,
b: 0,
};
pub const ORANGE_RED: Pixel = Pixel {
r: 255,
g: 69,
b: 0,
};
pub const ORCHID: Pixel = Pixel {
r: 218,
g: 112,
b: 214,
};
pub const PALE_GOLDENROD: Pixel = Pixel {
r: 238,
g: 232,
b: 170,
};
pub const PALE_GREEN: Pixel = Pixel {
r: 152,
g: 251,
b: 152,
};
pub const PALE_TURQUOISE: Pixel = Pixel {
r: 175,
g: 238,
b: 238,
};
pub const PALE_VIOLETRED: Pixel = Pixel {
r: 219,
g: 112,
b: 147,
};
pub const PAPAYAWHIP: Pixel = Pixel {
r: 255,
g: 239,
b: 213,
};
pub const PEACHPUFF: Pixel = Pixel {
r: 255,
g: 218,
b: 185,
};
pub const PERU: Pixel = Pixel {
r: 205,
g: 133,
b: 63,
};
pub const PINK: Pixel = Pixel {
r: 255,
g: 192,
b: 203,
};
pub const PLUM: Pixel = Pixel {
r: 221,
g: 160,
b: 221,
};
pub const POWDER_BLUE: Pixel = Pixel {
r: 176,
g: 224,
b: 230,
};
pub const PURPLE: Pixel = Pixel {
r: 128,
g: 0,
b: 128,
};
pub const RED: Pixel = Pixel { r: 255, g: 0, b: 0 };
pub const ROSY_BROWN: Pixel = Pixel {
r: 188,
g: 143,
b: 143,
};
pub const ROYAL_BLUE: Pixel = Pixel {
r: 65,
g: 105,
b: 225,
};
pub const SADDLE_BROWN: Pixel = Pixel {
r: 139,
g: 69,
b: 19,
};
pub const SALMON: Pixel = Pixel {
r: 250,
g: 128,
b: 114,
};
pub const SANDY_BROWN: Pixel = Pixel {
r: 244,
g: 164,
b: 96,
};
pub const SEAGREEN: Pixel = Pixel {
r: 46,
g: 139,
b: 87,
};
pub const SEASHELL: Pixel = Pixel {
r: 255,
g: 245,
b: 238,
};
pub const SIENNA: Pixel = Pixel {
r: 160,
g: 82,
b: 45,
};
pub const SILVER: Pixel = Pixel {
r: 192,
g: 192,
b: 192,
};
pub const SKYBLUE: Pixel = Pixel {
r: 135,
g: 206,
b: 235,
};
pub const SLATE_BLUE: Pixel = Pixel {
r: 106,
g: 90,
b: 205,
};
pub const SLATE_GRAY: Pixel = Pixel {
r: 112,
g: 128,
b: 144,
};
pub const SLATE_GREY: Pixel = Pixel {
r: 112,
g: 128,
b: 144,
};
pub const SNOW: Pixel = Pixel {
r: 255,
g: 250,
b: 250,
};
pub const SPRING_GREEN: Pixel = Pixel {
r: 0,
g: 255,
b: 127,
};
pub const STEEL_BLUE: Pixel = Pixel {
r: 70,
g: 130,
b: 180,
};
pub const TAN: Pixel = Pixel {
r: 210,
g: 180,
b: 140,
};
pub const TEAL: Pixel = Pixel {
r: 0,
g: 128,
b: 128,
};
pub const THISTLE: Pixel = Pixel {
r: 216,
g: 191,
b: 216,
};
pub const TOMATO: Pixel = Pixel {
r: 255,
g: 99,
b: 71,
};
pub const TURQUOISE: Pixel = Pixel {
r: 64,
g: 224,
b: 208,
};
pub const VIOLET: Pixel = Pixel {
r: 238,
g: 130,
b: 238,
};
pub const WHEAT: Pixel = Pixel {
r: 245,
g: 222,
b: 179,
};
pub const WHITE: Pixel = Pixel {
r: 255,
g: 255,
b: 255,
};
pub const WHITE_SMOKE: Pixel = Pixel {
r: 245,
g: 245,
b: 245,
};
pub const YELLOW: Pixel = Pixel {
r: 255,
g: 255,
b: 0,
};
pub const YELLOW_GREEN: Pixel = Pixel {
r: 154,
g: 205,
b: 50,
};<|fim▁end|> | r: 0,
g: 191,
b: 255, |
<|file_name|>adt-brace-structs.rs<|end_file_name|><|fim▁begin|>// Unit test for the "user substitutions" that are annotated on each
// node.
struct SomeStruct<T> { t: T }
fn no_annot() {
let c = 66;
SomeStruct { t: &c };
}
fn annot_underscore() {
let c = 66;
SomeStruct::<_> { t: &c };
}
fn annot_reference_any_lifetime() {
let c = 66;
SomeStruct::<&u32> { t: &c };
}
fn annot_reference_static_lifetime() {
let c = 66;
SomeStruct::<&'static u32> { t: &c }; //~ ERROR
}
<|fim▁hole|> let c = 66;
SomeStruct::<&'a u32> { t: &c }; //~ ERROR
}
fn annot_reference_named_lifetime_ok<'a>(c: &'a u32) {
SomeStruct::<&'a u32> { t: c };
}
fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
let _closure = || {
let c = 66;
SomeStruct::<&'a u32> { t: &c }; //~ ERROR
};
}
fn annot_reference_named_lifetime_in_closure_ok<'a>(c: &'a u32) {
let _closure = || {
SomeStruct::<&'a u32> { t: c };
};
}
fn main() { }<|fim▁end|> | fn annot_reference_named_lifetime<'a>(_d: &'a u32) { |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>"""
flask.ext.restless.views
~~~~~~~~~~~~~~~~~~~~~~~~
Provides the following view classes, subclasses of
:class:`flask.MethodView` which provide generic endpoints for interacting
with an entity of the database:
:class:`flask.ext.restless.views.API`
Provides the endpoints for each of the basic HTTP methods. This is the
main class used by the
:meth:`flask.ext.restless.manager.APIManager.create_api` method to create
endpoints.
:class:`flask.ext.restless.views.FunctionAPI`
Provides a :http:method:`get` endpoint which returns the result of
evaluating some function on the entire collection of a given model.
:copyright: 2011 by Lincoln de Sousa <[email protected]>
:copyright: 2012, 2013, 2014, 2015 Jeffrey Finkelstein
<[email protected]> and contributors.
:license: GNU AGPLv3+ or BSD
"""
from __future__ import division
from collections import defaultdict
from functools import wraps
import math
import warnings
from flask import current_app
from flask import json
from flask import jsonify as _jsonify
from flask import request
from flask.views import MethodView
from mimerender import FlaskMimeRender
from sqlalchemy import Column
from sqlalchemy.exc import DataError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.query import Query
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import HTTPException
from werkzeug.urls import url_quote_plus
from .helpers import count
from .helpers import evaluate_functions
from .helpers import get_by
from .helpers import get_columns
from .helpers import get_or_create
from .helpers import get_related_model
from .helpers import get_relations
from .helpers import has_field
from .helpers import is_like_list
from .helpers import partition
from .helpers import primary_key_name
from .helpers import query_by_primary_key
from .helpers import session_query
from .helpers import strings_to_dates
from .helpers import to_dict
from .helpers import upper_keys
from .helpers import get_related_association_proxy_model
from .search import create_query
from .search import search
#: Format string for creating Link headers in paginated responses.
LINKTEMPLATE = '<{0}?page={1}&results_per_page={2}>; rel="{3}"'
#: String used internally as a dictionary key for passing header information
#: from view functions to the :func:`jsonpify` function.
_HEADERS = '__restless_headers'
#: String used internally as a dictionary key for passing status code
#: information from view functions to the :func:`jsonpify` function.
_STATUS = '__restless_status_code'
class ProcessingException(HTTPException):
"""Raised when a preprocessor or postprocessor encounters a problem.
This exception should be raised by functions supplied in the
``preprocessors`` and ``postprocessors`` keyword arguments to
:class:`APIManager.create_api`. When this exception is raised, all
preprocessing or postprocessing halts, so any processors appearing later in
the list will not be invoked.
`code` is the HTTP status code of the response supplied to the client in
the case that this exception is raised. `description` is an error message
describing the cause of this exception. This message will appear in the
JSON object in the body of the response to the client.
"""
def __init__(self, description='', code=400, *args, **kwargs):
super(ProcessingException, self).__init__(*args, **kwargs)
self.code = code
self.description = description
class NotAuthorizedException(HTTPException):
"""Raised whenever you want a 403.
"""
def __init__(self, description='Not Authorized', code=403, *args, **kwargs):
super(NotAuthorizedException, self).__init__(*args, **kwargs)
self.code = code
self.description = description
class ValidationError(Exception):
"""Raised when there is a problem deserializing a dictionary into an
instance of a SQLAlchemy model.
"""
pass
def _is_msie8or9():
"""Returns ``True`` if and only if the user agent of the client making the
request indicates that it is Microsoft Internet Explorer 8 or 9.
.. note::
We have no way of knowing if the user agent is lying, so we just make
our best guess based on the information provided.
"""
# request.user_agent.version comes as a string, so we have to parse it
version = lambda ua: tuple(int(d) for d in ua.version.split('.'))
return (request.user_agent is not None
and request.user_agent.version is not None
and request.user_agent.browser == 'msie'
and (8, 0) <= version(request.user_agent) < (10, 0))
def create_link_string(page, last_page, per_page):
"""Returns a string representing the value of the ``Link`` header.
`page` is the number of the current page, `last_page` is the last page in
the pagination, and `per_page` is the number of results per page.
"""
linkstring = ''
if page < last_page:
next_page = page + 1
linkstring = LINKTEMPLATE.format(request.base_url, next_page,
per_page, 'next') + ', '
linkstring += LINKTEMPLATE.format(request.base_url, last_page,
per_page, 'last')
return linkstring
def catch_processing_exceptions(func):
"""Decorator that catches :exc:`ProcessingException`s and subsequently
returns a JSON-ified error response.
"""
@wraps(func)
def decorator(*args, **kw):
try:
return func(*args, **kw)
except ProcessingException as exception:
status = exception.code
message = exception.description or str(exception)
return jsonify(message=message), status
return decorator
def catch_integrity_errors(session):
"""Returns a decorator that catches database integrity errors.
`session` is the SQLAlchemy session in which all database transactions will
be performed.
View methods can be wrapped like this::
@catch_integrity_errors(session)
def get(self, *args, **kw):
return '...'
Specifically, functions wrapped with the returned decorator catch
:exc:`IntegrityError`s, :exc:`DataError`s, and
:exc:`ProgrammingError`s. After the exceptions are caught, the session is
rolled back, the exception is logged on the current Flask application, and
an error response is returned to the client.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
# TODO should `sqlalchemy.exc.InvalidRequestError`s also be caught?
except (DataError, IntegrityError, ProgrammingError) as exception:
session.rollback()
current_app.logger.exception(str(exception))
return dict(message=type(exception).__name__), 400
return wrapped
return decorator
def set_headers(response, headers):
"""Sets the specified headers on the specified response.
`response` is a Flask response object, and `headers` is a dictionary of
headers to set on the specified response. Any existing headers that
conflict with `headers` will be overwritten.
"""
for key, value in headers.items():<|fim▁hole|>
def jsonify(*args, **kw):
"""Same as :func:`flask.jsonify`, but sets response headers.
If ``headers`` is a keyword argument, this function will construct the JSON
response via :func:`flask.jsonify`, then set the specified ``headers`` on
the response. ``headers`` must be a dictionary mapping strings to strings.
"""
response = _jsonify(*args, **kw)
if 'headers' in kw:
set_headers(response, kw['headers'])
return response
# This code is (lightly) adapted from the ``requests`` library, in the
# ``requests.utils`` module. See <http://python-requests.org> for more
# information.
def _link_to_json(value):
"""Returns a list representation of the specified HTTP Link header
information.
`value` is a string containing the link header information. If the link
header information (the part of after ``Link:``) looked like this::
<url1>; rel="next", <url2>; rel="foo"; bar="baz"
then this function returns a list that looks like this::
[{"url": "url1", "rel": "next"},
{"url": "url2", "rel": "foo", "bar": "baz"}]
This example is adapted from the documentation of GitHub's API.
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
def _headers_to_json(headers):
"""Returns a dictionary representation of the specified dictionary of HTTP
headers ready for use as a JSON object.
Pre-condition: headers is not ``None``.
"""
link = headers.pop('Link', None)
# Shallow copy is fine here because the `headers` dictionary maps strings
# to strings to strings.
result = headers.copy()
if link:
result['Link'] = _link_to_json(link)
return result
def jsonpify(*args, **kw):
"""Passes the specified arguments directly to :func:`jsonify` with a status
code of 200, then wraps the response with the name of a JSON-P callback
function specified as a query parameter called ``'callback'`` (or does
nothing if no such callback function is specified in the request).
If the keyword arguments include the string specified by :data:`_HEADERS`,
its value must be a dictionary specifying headers to set before sending the
JSONified response to the client. Headers on the response will be
overwritten by headers specified in this dictionary.
If the keyword arguments include the string specified by :data:`_STATUS`,
its value must be an integer representing the status code of the response.
Otherwise, the status code of the response will be :http:status:`200`.
"""
# HACK In order to make the headers and status code available in the
# content of the response, we need to send it from the view function to
# this jsonpify function via its keyword arguments. This is a limitation of
# the mimerender library: it has no way of making the headers and status
# code known to the rendering functions.
headers = kw.pop(_HEADERS, {})
status_code = kw.pop(_STATUS, 200)
response = jsonify(*args, **kw)
callback = request.args.get('callback', False)
if callback:
# Reload the data from the constructed JSON string so we can wrap it in
# a JSONP function.
data = json.loads(response.data)
# Force the 'Content-Type' header to be 'application/javascript'.
#
# Note that this is different from the mimetype used in Flask for JSON
# responses; Flask uses 'application/json'. We use
# 'application/javascript' because a JSONP response is valid
# Javascript, but not valid JSON.
headers['Content-Type'] = 'application/javascript'
# Add the headers and status code as metadata to the JSONP response.
meta = _headers_to_json(headers) if headers is not None else {}
meta['status'] = status_code
inner = json.dumps(dict(meta=meta, data=data))
content = '{0}({1})'.format(callback, inner)
# Note that this is different from the mimetype used in Flask for JSON
# responses; Flask uses 'application/json'. We use
# 'application/javascript' because a JSONP response is not valid JSON.
mimetype = 'application/javascript'
response = current_app.response_class(content, mimetype=mimetype)
# Set the headers on the HTTP response as well.
if headers:
set_headers(response, headers)
response.status_code = status_code
return response
def _parse_includes(column_names):
"""Returns a pair, consisting of a list of column names to include on the
left and a dictionary mapping relation name to a list containing the names
of fields on the related model which should be included.
`column_names` must be a list of strings.
If the name of a relation appears as a key in the dictionary, then it will
not appear in the list.
"""
dotted_names, columns = partition(column_names, lambda name: '.' in name)
# Create a dictionary mapping relation names to fields on the related
# model.
relations = defaultdict(list)
for name in dotted_names:
relation, field = name.split('.', 1)
# Only add the relation if it's column has been specified.
if relation in columns:
relations[relation].append(field)
# Included relations need only be in the relations dictionary, not the
# columns list.
for relation in relations:
if relation in columns:
columns.remove(relation)
return columns, relations
def _parse_excludes(column_names):
"""Returns a pair, consisting of a list of column names to exclude on the
left and a dictionary mapping relation name to a list containing the names
of fields on the related model which should be excluded.
`column_names` must be a list of strings.
If the name of a relation appears in the list then it will not appear in
the dictionary.
"""
dotted_names, columns = partition(column_names, lambda name: '.' in name)
# Create a dictionary mapping relation names to fields on the related
# model.
relations = defaultdict(list)
for name in dotted_names:
relation, field = name.split('.', 1)
# Only add the relation if it's column has not been specified.
if relation not in columns:
relations[relation].append(field)
# Relations which are to be excluded entirely need only be in the columns
# list, not the relations dictionary.
for column in columns:
if column in relations:
del relations[column]
return columns, relations
def extract_error_messages(exception):
"""Tries to extract a dictionary mapping field name to validation error
messages from `exception`, which is a validation exception as provided in
the ``validation_exceptions`` keyword argument in the constructor of this
class.
Since the type of the exception is provided by the user in the constructor
of this class, we don't know for sure where the validation error messages
live inside `exception`. Therefore this method simply attempts to access a
few likely attributes and returns the first one it finds (or ``None`` if no
error messages dictionary can be extracted).
"""
# 'errors' comes from sqlalchemy_elixir_validations
if hasattr(exception, 'errors'):
return exception.errors
# 'message' comes from savalidation
if hasattr(exception, 'message'):
# TODO this works only if there is one validation error
try:
left, right = str(exception).rsplit(':', 1)
left_bracket = left.rindex('[')
right_bracket = right.rindex(']')
except ValueError as exc:
current_app.logger.exception(str(exc))
# could not parse the string; we're not trying too hard here...
return None
msg = right[:right_bracket].strip(' "')
fieldname = left[left_bracket + 1:].strip()
return {fieldname: msg}
return None
#: Creates the mimerender object necessary for decorating responses with a
#: function that automatically formats the dictionary in the appropriate format
#: based on the ``Accept`` header.
#:
#: Technical details: the first pair of parantheses instantiates the
#: :class:`mimerender.FlaskMimeRender` class. The second pair of parentheses
#: creates the decorator, so that we can simply use the variable ``mimerender``
#: as a decorator.
# TODO fill in xml renderer
mimerender = FlaskMimeRender()(default='json', json=jsonpify)
class ModelView(MethodView):
"""Base class for :class:`flask.MethodView` classes which represent a view
of a SQLAlchemy model.
The model class for this view can be accessed from the :attr:`model`
attribute, and the session in which all database transactions will be
performed when dealing with this model can be accessed from the
:attr:`session` attribute.
When subclasses wish to make queries to the database model specified in the
constructor, they should access the ``self.query`` function, which
delegates to the appropriate SQLAlchemy query object or Flask-SQLAlchemy
query object, depending on how the model has been defined.
"""
#: List of decorators applied to every method of this class.
decorators = [mimerender]
def __init__(self, session, model, *args, **kw):
"""Calls the constructor of the superclass and specifies the model for
which this class provides a ReSTful API.
`session` is the SQLAlchemy session in which all database transactions
will be performed.
`model` is the SQLALchemy declarative model class of the database model
for which this instance of the class is an API.
"""
super(ModelView, self).__init__(*args, **kw)
self.session = session
self.model = model
def query(self, model=None):
"""Returns either a SQLAlchemy query or Flask-SQLAlchemy query object
(depending on the type of the model) on the specified `model`, or if
`model` is ``None``, the model specified in the constructor of this
class.
"""
return session_query(self.session, model or self.model)
class FunctionAPI(ModelView):
"""Provides method-based dispatching for :http:method:`get` requests which
wish to apply SQL functions to all instances of a model.
.. versionadded:: 0.4
"""
def get(self):
"""Returns the result of evaluating the SQL functions specified in the
body of the request.
For a description of the request and response formats, see
:ref:`functionevaluation`.
"""
if 'q' not in request.args or not request.args.get('q'):
return dict(message='Empty query parameter'), 400
# if parsing JSON fails, return a 400 error in JSON format
try:
data = json.loads(str(request.args.get('q'))) or {}
except (TypeError, ValueError, OverflowError) as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to decode data'), 400
try:
result = evaluate_functions(self.session, self.model,
data.get('functions', []))
if not result:
return {}, 204
return result
except AttributeError as exception:
current_app.logger.exception(str(exception))
message = 'No such field "{0}"'.format(exception.field)
return dict(message=message), 400
except OperationalError as exception:
current_app.logger.exception(str(exception))
message = 'No such function "{0}"'.format(exception.function)
return dict(message=message), 400
class API(ModelView):
"""Provides method-based dispatching for :http:method:`get`,
:http:method:`post`, :http:method:`patch`, :http:method:`put`, and
:http:method:`delete` requests, for both collections of models and
individual models.
"""
#: List of decorators applied to every method of this class.
decorators = ModelView.decorators + [catch_processing_exceptions]
def __init__(self, session, model, exclude_columns=None,
include_columns=None, include_methods=None,
validation_exceptions=None, results_per_page=10,
max_results_per_page=100, post_form_preprocessor=None,
preprocessors=None, postprocessors=None, primary_key=None,
serializer=None, deserializer=None, *args, **kw):
"""Instantiates this view with the specified attributes.
`session` is the SQLAlchemy session in which all database transactions
will be performed.
`model` is the SQLAlchemy model class for which this instance of the
class is an API. This model should live in `database`.
`validation_exceptions` is the tuple of exceptions raised by backend
validation (if any exist). If exceptions are specified here, any
exceptions which are caught when writing to the database. Will be
returned to the client as a :http:statuscode:`400` response with a
message specifying the validation error which occurred. For more
information, see :ref:`validation`.
If either `include_columns` or `exclude_columns` is not ``None``,
exactly one of them must be specified. If both are not ``None``, then
the behavior of this function is undefined. `exclude_columns` must be
an iterable of strings specifying the columns of `model` which will
*not* be present in the JSON representation of the model provided in
response to :http:method:`get` requests. Similarly, `include_columns`
specifies the *only* columns which will be present in the returned
dictionary. In other words, `exclude_columns` is a blacklist and
`include_columns` is a whitelist; you can only use one of them per API
endpoint. If either `include_columns` or `exclude_columns` contains a
string which does not name a column in `model`, it will be ignored.
If `include_columns` is an iterable of length zero (like the empty
tuple or the empty list), then the returned dictionary will be
empty. If `include_columns` is ``None``, then the returned dictionary
will include all columns not excluded by `exclude_columns`.
If `include_methods` is an iterable of strings, the methods with names
corresponding to those in this list will be called and their output
included in the response.
See :ref:`includes` for information on specifying included or excluded
columns on fields of related models.
`results_per_page` is a positive integer which represents the default
number of results which are returned per page. Requests made by clients
may override this default by specifying ``results_per_page`` as a query
argument. `max_results_per_page` is a positive integer which represents
the maximum number of results which are returned per page. This is a
"hard" upper bound in the sense that even if a client specifies that
greater than `max_results_per_page` should be returned, only
`max_results_per_page` results will be returned. For more information,
see :ref:`serverpagination`.
.. deprecated:: 0.9.2
The `post_form_preprocessor` keyword argument is deprecated in
version 0.9.2. It will be removed in version 1.0. Replace code that
looks like this::
manager.create_api(Person, post_form_preprocessor=foo)
with code that looks like this::
manager.create_api(Person, preprocessors=dict(POST=[foo]))
See :ref:`processors` for more information and examples.
`post_form_preprocessor` is a callback function which takes
POST input parameters loaded from JSON and enhances them with other
key/value pairs. The example use of this is when your ``model``
requires to store user identity and for security reasons the identity
is not read from the post parameters (where malicious user can tamper
with them) but from the session.
`preprocessors` is a dictionary mapping strings to lists of
functions. Each key is the name of an HTTP method (for example,
``'GET'`` or ``'POST'``). Each value is a list of functions, each of
which will be called before any other code is executed when this API
receives the corresponding HTTP request. The functions will be called
in the order given here. The `postprocessors` keyword argument is
essentially the same, except the given functions are called after all
other code. For more information on preprocessors and postprocessors,
see :ref:`processors`.
`primary_key` is a string specifying the name of the column of `model`
to use as the primary key for the purposes of creating URLs. If the
`model` has exactly one primary key, there is no need to provide a
value for this. If `model` has two or more primary keys, you must
specify which one to use.
`serializer` and `deserializer` are custom serialization functions. The
former function must take a single argument representing the instance
of the model to serialize, and must return a dictionary representation
of that instance. The latter function must take a single argument
representing the dictionary representation of an instance of the model
and must return an instance of `model` that has those attributes. For
more information, see :ref:`serialization`.
.. versionadded:: 0.17.0
Added the `serializer` and `deserializer` keyword arguments.
.. versionadded:: 0.13.0
Added the `primary_key` keyword argument.
.. versionadded:: 0.10.2
Added the `include_methods` keyword argument.
.. versionchanged:: 0.10.0
Removed `authentication_required_for` and `authentication_function`
keyword arguments.
Use the `preprocesors` and `postprocessors` keyword arguments
instead. For more information, see :ref:`authentication`.
.. versionadded:: 0.9.2
Added the `preprocessors` and `postprocessors` keyword arguments.
.. versionadded:: 0.9.0
Added the `max_results_per_page` keyword argument.
.. versionadded:: 0.7
Added the `exclude_columns` keyword argument.
.. versionadded:: 0.6
Added the `results_per_page` keyword argument.
.. versionadded:: 0.5
Added the `include_columns`, and `validation_exceptions` keyword
arguments.
.. versionadded:: 0.4
Added the `authentication_required_for` and
`authentication_function` keyword arguments.
"""
super(API, self).__init__(session, model, *args, **kw)
if exclude_columns is None:
self.exclude_columns, self.exclude_relations = (None, None)
else:
self.exclude_columns, self.exclude_relations = _parse_excludes(
[self._get_column_name(column) for column in exclude_columns])
if include_columns is None:
self.include_columns, self.include_relations = (None, None)
else:
self.include_columns, self.include_relations = _parse_includes(
[self._get_column_name(column) for column in include_columns])
self.include_methods = include_methods
self.validation_exceptions = tuple(validation_exceptions or ())
self.results_per_page = results_per_page
self.max_results_per_page = max_results_per_page
self.primary_key = primary_key
# Use our default serializer and deserializer if none are specified.
if serializer is None:
self.serialize = self._inst_to_dict
else:
self.serialize = serializer
if deserializer is None:
self.deserialize = self._dict_to_inst
# And check for our own default ValidationErrors here
self.validation_exceptions = tuple(list(self.validation_exceptions)
+ [ValidationError])
else:
self.deserialize = deserializer
self.postprocessors = defaultdict(list)
self.preprocessors = defaultdict(list)
self.postprocessors.update(upper_keys(postprocessors or {}))
self.preprocessors.update(upper_keys(preprocessors or {}))
# move post_form_preprocessor to preprocessors['POST'] for backward
# compatibility
if post_form_preprocessor:
msg = ('post_form_preprocessor is deprecated and will be removed'
' in version 1.0; use preprocessors instead.')
warnings.warn(msg, DeprecationWarning)
self.preprocessors['POST'].append(post_form_preprocessor)
# postprocessors for PUT are applied to PATCH because PUT is just a
# redirect to PATCH
for postprocessor in self.postprocessors['PUT_SINGLE']:
self.postprocessors['PATCH_SINGLE'].append(postprocessor)
for preprocessor in self.preprocessors['PUT_SINGLE']:
self.preprocessors['PATCH_SINGLE'].append(preprocessor)
for postprocessor in self.postprocessors['PUT_MANY']:
self.postprocessors['PATCH_MANY'].append(postprocessor)
for preprocessor in self.preprocessors['PUT_MANY']:
self.preprocessors['PATCH_MANY'].append(preprocessor)
# HACK: We would like to use the :attr:`API.decorators` class attribute
# in order to decorate each view method with a decorator that catches
# database integrity errors. However, in order to rollback the session,
# we need to have a session object available to roll back. Therefore we
# need to manually decorate each of the view functions here.
decorate = lambda name, f: setattr(self, name, f(getattr(self, name)))
for method in ['get', 'post', 'patch', 'put', 'delete']:
decorate(method, catch_integrity_errors(self.session))
def _get_column_name(self, column):
"""Retrieve a column name from a column attribute of SQLAlchemy
model class, or a string.
Raises `TypeError` when argument does not fall into either of those
options.
Raises `ValueError` if argument is a column attribute that belongs
to an incorrect model class.
"""
if hasattr(column, '__clause_element__'):
clause_element = column.__clause_element__()
if not isinstance(clause_element, Column):
msg = ('Column must be a string or a column attribute'
' of SQLAlchemy ORM class')
raise TypeError(msg)
model = column.class_
if model is not self.model:
msg = ('Cannot specify column of model {0} while creating API'
' for model {1}').format(model.__name__,
self.model.__name__)
raise ValueError(msg)
return clause_element.key
return column
def _add_to_relation(self, query, relationname, toadd=None):
"""Adds a new or existing related model to each model specified by
`query`.
This function does not commit the changes made to the database. The
calling function has that responsibility.
`query` is a SQLAlchemy query instance that evaluates to all instances
of the model specified in the constructor of this class that should be
updated.
`relationname` is the name of a one-to-many relationship which exists
on each model specified in `query`.
`toadd` is a list of dictionaries, each representing the attributes of
an existing or new related model to add. If a dictionary contains the
key ``'id'``, that instance of the related model will be
added. Otherwise, the :func:`helpers.get_or_create` class method will
be used to get or create a model to add.
"""
submodel = get_related_model(self.model, relationname)
if isinstance(toadd, dict):
toadd = [toadd]
for dictionary in toadd or []:
subinst = get_or_create(self.session, submodel, dictionary)
try:
for instance in query:
getattr(instance, relationname).append(subinst)
except AttributeError as exception:
current_app.logger.exception(str(exception))
setattr(instance, relationname, subinst)
def _remove_from_relation(self, query, relationname, toremove=None):
"""Removes a related model from each model specified by `query`.
This function does not commit the changes made to the database. The
calling function has that responsibility.
`query` is a SQLAlchemy query instance that evaluates to all instances
of the model specified in the constructor of this class that should be
updated.
`relationname` is the name of a one-to-many relationship which exists
on each model specified in `query`.
`toremove` is a list of dictionaries, each representing the attributes
of an existing model to remove. If a dictionary contains the key
``'id'``, that instance of the related model will be
removed. Otherwise, the instance to remove will be retrieved using the
other attributes specified in the dictionary. If multiple instances
match the specified attributes, only the first instance will be
removed.
If one of the dictionaries contains a mapping from ``'__delete__'`` to
``True``, then the removed object will be deleted after being removed
from each instance of the model in the specified query.
"""
submodel = get_related_model(self.model, relationname)
for dictionary in toremove or []:
remove = dictionary.pop('__delete__', False)
if 'id' in dictionary:
subinst = get_by(self.session, submodel, dictionary['id'])
else:
subinst = self.query(submodel).filter_by(**dictionary).first()
for instance in query:
getattr(instance, relationname).remove(subinst)
if remove:
self.session.delete(subinst)
def _set_on_relation(self, query, relationname, toset=None):
"""Sets the value of the relation specified by `relationname` on each
instance specified by `query` to have the new or existing related
models specified by `toset`.
This function does not commit the changes made to the database. The
calling function has that responsibility.
`query` is a SQLAlchemy query instance that evaluates to all instances
of the model specified in the constructor of this class that should be
updated.
`relationname` is the name of a one-to-many relationship which exists
on each model specified in `query`.
`toset` is either a dictionary or a list of dictionaries, each
representing the attributes of an existing or new related model to
set. If a dictionary contains the key ``'id'``, that instance of the
related model will be added. Otherwise, the
:func:`helpers.get_or_create` method will be used to get or create a
model to set.
"""
submodel = get_related_model(self.model, relationname)
if isinstance(toset, list):
value = [get_or_create(self.session, submodel, d) for d in toset]
else:
value = get_or_create(self.session, submodel, toset)
for instance in query:
setattr(instance, relationname, value)
# TODO change this to have more sensible arguments
def _update_relations(self, query, params):
"""Adds, removes, or sets models which are related to the model
specified in the constructor of this class.
This function does not commit the changes made to the database. The
calling function has that responsibility.
This method returns a :class:`frozenset` of strings representing the
names of relations which were modified.
`query` is a SQLAlchemy query instance that evaluates to all instances
of the model specified in the constructor of this class that should be
updated.
`params` is a dictionary containing a mapping from name of the relation
to modify (as a string) to either a list or another dictionary. In the
former case, the relation will be assigned the instances specified by
the elements of the list, which are dictionaries as described below.
In the latter case, the inner dictionary contains at most two mappings,
one with the key ``'add'`` and one with the key ``'remove'``. Each of
these is a mapping to a list of dictionaries which represent the
attributes of the object to add to or remove from the relation.
If one of the dictionaries specified in ``add`` or ``remove`` (or the
list to be assigned) includes an ``id`` key, the object with that
``id`` will be attempt to be added or removed. Otherwise, an existing
object with the specified attribute values will be attempted to be
added or removed. If adding, a new object will be created if a matching
object could not be found in the database.
If a dictionary in one of the ``'remove'`` lists contains a mapping
from ``'__delete__'`` to ``True``, then the removed object will be
deleted after being removed from each instance of the model in the
specified query.
"""
relations = get_relations(self.model)
tochange = frozenset(relations) & frozenset(params)
for columnname in tochange:
# Check if 'add' or 'remove' is being used
if (isinstance(params[columnname], dict)
and any(k in params[columnname] for k in ['add', 'remove'])):
toadd = params[columnname].get('add', [])
toremove = params[columnname].get('remove', [])
self._add_to_relation(query, columnname, toadd=toadd)
self._remove_from_relation(query, columnname,
toremove=toremove)
else:
toset = params[columnname]
self._set_on_relation(query, columnname, toset=toset)
return tochange
def _handle_validation_exception(self, exception):
"""Rolls back the session, extracts validation error messages, and
returns a :func:`flask.jsonify` response with :http:statuscode:`400`
containing the extracted validation error messages.
Again, *this method calls
:meth:`sqlalchemy.orm.session.Session.rollback`*.
"""
self.session.rollback()
errors = extract_error_messages(exception) or \
'Could not determine specific validation errors'
return dict(validation_errors=errors), 400
def _compute_results_per_page(self):
"""Helper function which returns the number of results per page based
on the request argument ``results_per_page`` and the server
configuration parameters :attr:`results_per_page` and
:attr:`max_results_per_page`.
"""
try:
results_per_page = int(request.args.get('results_per_page'))
except:
results_per_page = self.results_per_page
if results_per_page <= 0:
results_per_page = self.results_per_page
return min(results_per_page, self.max_results_per_page)
# TODO it is ugly to have `deep` as an arg here; can we remove it?
def _paginated(self, instances, deep):
"""Returns a paginated JSONified response from the specified list of
model instances.
`instances` is either a Python list of model instances or a
:class:`~sqlalchemy.orm.Query`.
`deep` is the dictionary which defines the depth of submodels to output
in the JSON format of the model instances in `instances`; it is passed
directly to :func:`helpers.to_dict`.
The response data is JSON of the form:
.. sourcecode:: javascript
{
"page": 2,
"total_pages": 3,
"num_results": 8,
"objects": [{"id": 1, "name": "Jeffrey", "age": 24}, ...]
}
"""
if isinstance(instances, list):
num_results = len(instances)
else:
num_results = count(self.session, instances)
results_per_page = self._compute_results_per_page()
if results_per_page > 0:
# get the page number (first page is page 1)
page_num = int(request.args.get('page', 1))
start = (page_num - 1) * results_per_page
end = min(num_results, start + results_per_page)
total_pages = int(math.ceil(num_results / results_per_page))
else:
page_num = 1
start = 0
end = num_results
total_pages = 1
objects = [to_dict(x, deep, exclude=self.exclude_columns,
exclude_relations=self.exclude_relations,
include=self.include_columns,
include_relations=self.include_relations,
include_methods=self.include_methods)
for x in instances[start:end]]
return dict(page=page_num, objects=objects, total_pages=total_pages,
num_results=num_results)
def _inst_to_dict(self, inst):
"""Returns the dictionary representation of the specified instance.
This method respects the include and exclude columns specified in the
constructor of this class.
"""
# create a placeholder for the relations of the returned models
relations = frozenset(get_relations(self.model))
# do not follow relations that will not be included in the response
if self.include_columns is not None:
cols = frozenset(self.include_columns)
rels = frozenset(self.include_relations)
relations &= (cols | rels)
elif self.exclude_columns is not None:
relations -= frozenset(self.exclude_columns)
deep = dict((r, {}) for r in relations)
return to_dict(inst, deep, exclude=self.exclude_columns,
exclude_relations=self.exclude_relations,
include=self.include_columns,
include_relations=self.include_relations,
include_methods=self.include_methods)
def _dict_to_inst(self, data):
"""Returns an instance of the model with the specified attributes."""
# Check for any request parameter naming a column which does not exist
# on the current model.
for field in data:
if not has_field(self.model, field):
msg = "Model does not have field '{0}'".format(field)
raise ValidationError(msg)
# Getting the list of relations that will be added later
cols = get_columns(self.model)
relations = get_relations(self.model)
# Looking for what we're going to set on the model right now
colkeys = cols.keys()
paramkeys = data.keys()
props = set(colkeys).intersection(paramkeys).difference(relations)
# Special case: if there are any dates, convert the string form of the
# date into an instance of the Python ``datetime`` object.
data = strings_to_dates(self.model, data)
# Instantiate the model with the parameters.
modelargs = dict([(i, data[i]) for i in props])
instance = self.model(**modelargs)
# Handling relations, a single level is allowed
for col in set(relations).intersection(paramkeys):
submodel = get_related_model(self.model, col)
if type(data[col]) == list:
# model has several related objects
for subparams in data[col]:
subinst = get_or_create(self.session, submodel,
subparams)
try:
getattr(instance, col).append(subinst)
except AttributeError:
attribute = getattr(instance, col)
attribute[subinst.key] = subinst.value
else:
# model has single related object
subinst = get_or_create(self.session, submodel,
data[col])
setattr(instance, col, subinst)
return instance
def _instid_to_dict(self, instid):
"""Returns the dictionary representation of the instance specified by
`instid`.
If no such instance of the model exists, this method aborts with a
:http:statuscode:`404`.
"""
inst = get_by(self.session, self.model, instid, self.primary_key)
if inst is None:
return {_STATUS: 404}, 404
return self._inst_to_dict(inst)
def _search(self):
"""Defines a generic search function for the database model.
If the query string is empty, or if the specified query is invalid for
some reason (for example, searching for all person instances with), the
response will be the JSON string ``{"objects": []}``.
To search for entities meeting some criteria, the client makes a
request to :http:get:`/api/<modelname>` with a query string containing
the parameters of the search. The parameters of the search can involve
filters. In a filter, the client specifies the name of the field by
which to filter, the operation to perform on the field, and the value
which is the argument to that operation. In a function, the client
specifies the name of a SQL function which is executed on the search
results; the result of executing the function is returned to the
client.
The parameters of the search must be provided in JSON form as the value
of the ``q`` request query parameter. For example, in a database of
people, to search for all people with a name containing a "y", the
client would make a :http:method:`get` request to ``/api/person`` with
query parameter as follows::
q={"filters": [{"name": "name", "op": "like", "val": "%y%"}]}
If multiple objects meet the criteria of the search, the response has
:http:status:`200` and content of the form::
.. sourcecode:: javascript
{"objects": [{"name": "Mary"}, {"name": "Byron"}, ...]}
If the result of the search is a single instance of the model, the JSON
representation of that instance would be the top-level object in the
content of the response::
.. sourcecode:: javascript
{"name": "Mary", ...}
For more information SQLAlchemy operators for use in filters, see the
`SQLAlchemy SQL expression tutorial
<http://docs.sqlalchemy.org/en/latest/core/tutorial.html>`_.
The general structure of request data as a JSON string is as follows::
.. sourcecode:: javascript
{
"single": true,
"order_by": [{"field": "age", "direction": "asc"}],
"limit": 2,
"offset": 1,
"disjunction": true,
"filters":
[
{"name": "name", "val": "%y%", "op": "like"},
{"name": "age", "val": [18, 19, 20, 21], "op": "in"},
{"name": "age", "op": "gt", "field": "height"},
...
]
}
For a complete description of all possible search parameters and
responses, see :ref:`searchformat`.
"""
# try to get search query from the request query parameters
try:
search_params = json.loads(request.args.get('q', '{}'))
except (TypeError, ValueError, OverflowError) as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to decode data'), 400
for preprocessor in self.preprocessors['GET_MANY']:
returned_values = preprocessor(search_params=search_params)
if returned_values:
search_params = returned_values
# resolve date-strings as required by the model
for param in search_params.get('filters', list()):
if 'name' in param and 'val' in param:
query_model = self.model
query_field = param['name']
if '__' in param['name']:
fieldname, relation = param['name'].split('__')
submodel = getattr(self.model, fieldname)
if isinstance(submodel, InstrumentedAttribute):
query_model = submodel.property.mapper.class_
query_field = relation
elif isinstance(submodel, AssociationProxy):
# For the sake of brevity, rename this function.
get_assoc = get_related_association_proxy_model
query_model = get_assoc(submodel)
query_field = relation
to_convert = {query_field: param['val']}
try:
result = strings_to_dates(query_model, to_convert)
except ValueError as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to construct query'), 400
param['val'] = result.get(query_field)
# perform a filtered search
try:
result = search(self.session, self.model, search_params)
except NoResultFound:
return dict(message='No result found'), 404
except MultipleResultsFound:
return dict(message='Multiple results found'), 400
except NotAuthorizedException:
return dict(message='Not Authorized'), 403
except Exception as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to construct query'), 400
# create a placeholder for the relations of the returned models
relations = frozenset(get_relations(self.model))
# do not follow relations that will not be included in the response
if self.include_columns is not None:
cols = frozenset(self.include_columns)
rels = frozenset(self.include_relations)
relations &= (cols | rels)
elif self.exclude_columns is not None:
relations -= frozenset(self.exclude_columns)
deep = dict((r, {}) for r in relations)
# for security purposes, don't transmit list as top-level JSON
if isinstance(result, Query):
result = self._paginated(result, deep)
# Create the Link header.
#
# TODO We are already calling self._compute_results_per_page() once
# in _paginated(); don't compute it again here.
page, last_page = result['page'], result['total_pages']
linkstring = create_link_string(page, last_page,
self._compute_results_per_page())
headers = dict(Link=linkstring)
else:
primary_key = self.primary_key or primary_key_name(result)
result = to_dict(result, deep, exclude=self.exclude_columns,
exclude_relations=self.exclude_relations,
include=self.include_columns,
include_relations=self.include_relations,
include_methods=self.include_methods)
# The URL at which a client can access the instance matching this
# search query.
url = '{0}/{1}'.format(request.base_url, result[primary_key])
headers = dict(Location=url)
for postprocessor in self.postprocessors['GET_MANY']:
returned_value = postprocessor(result=result, search_params=search_params)
if returned_value:
result = returned_value
# HACK Provide the headers directly in the result dictionary, so that
# the :func:`jsonpify` function has access to them. See the note there
# for more information.
result[_HEADERS] = headers
return result, 200, headers
def get(self, instid, relationname, relationinstid):
"""Returns a JSON representation of an instance of model with the
specified name.
If ``instid`` is ``None``, this method returns the result of a search
with parameters specified in the query string of the request. If no
search parameters are specified, this method returns all instances of
the specified model.
If ``instid`` is an integer, this method returns the instance of the
model with that identifying integer. If no such instance exists, this
method responds with :http:status:`404`.
"""
if instid is None:
return self._search()
for preprocessor in self.preprocessors['GET_SINGLE']:
returned_values = preprocessor(instance_id=instid)
if returned_values:
instid = returned_values
# get the instance of the "main" model whose ID is instid
instance = get_by(self.session, self.model, instid, self.primary_key)
if instance is None:
return {_STATUS: 404}, 404
# If no relation is requested, just return the instance. Otherwise,
# get the value of the relation specified by `relationname`.
if relationname is None:
result = self.serialize(instance)
else:
related_value = getattr(instance, relationname)
# create a placeholder for the relations of the returned models
related_model = get_related_model(self.model, relationname)
relations = frozenset(get_relations(related_model))
deep = dict((r, {}) for r in relations)
if relationinstid is not None:
related_value_instance = get_by(self.session, related_model,
relationinstid)
if related_value_instance is None:
return {_STATUS: 404}, 404
result = to_dict(related_value_instance, deep)
else:
# for security purposes, don't transmit list as top-level JSON
if is_like_list(instance, relationname):
result = self._paginated(list(related_value), deep)
else:
result = to_dict(related_value, deep)
if result is None:
return {_STATUS: 404}, 404
for postprocessor in self.postprocessors['GET_SINGLE']:
returned_value = postprocessor(result=result)
if returned_value:
result = returned_value
return result
def _delete_many(self):
"""Deletes multiple instances of the model.
If search parameters are provided via the ``q`` query parameter, only
those instances matching the search parameters will be deleted.
If no instances were deleted, this returns a
:http:status:`404`. Otherwise, it returns a :http:status:`200` with the
number of deleted instances in the body of the response.
"""
# try to get search query from the request query parameters
try:
search_params = json.loads(request.args.get('q', '{}'))
except (TypeError, ValueError, OverflowError) as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to decode search query'), 400
for preprocessor in self.preprocessors['DELETE_MANY']:
preprocessor(search_params=search_params)
# perform a filtered search
try:
# HACK We need to ignore any ``order_by`` request from the client,
# because for some reason, SQLAlchemy does not allow calling
# delete() on a query that has an ``order_by()`` on it. If you
# attempt to call delete(), you get this error:
#
# sqlalchemy.exc.InvalidRequestError: Can't call Query.delete()
# when order_by() has been called
#
result = search(self.session, self.model, search_params,
_ignore_order_by=True)
except NoResultFound:
return dict(message='No result found'), 404
except MultipleResultsFound:
return dict(message='Multiple results found'), 400
except Exception as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to construct query'), 400
# for security purposes, don't transmit list as top-level JSON
if isinstance(result, Query):
# Implementation note: `synchronize_session=False`, described in
# the SQLAlchemy documentation for
# :meth:`sqlalchemy.orm.query.Query.delete`, states that this is
# the most efficient option for bulk deletion, and is reliable once
# the session has expired, which occurs after the session commit
# below.
num_deleted = result.delete(synchronize_session=False)
else:
self.session.delete(result)
num_deleted = 1
self.session.commit()
result = dict(num_deleted=num_deleted)
for postprocessor in self.postprocessors['DELETE_MANY']:
postprocessor(result=result, search_params=search_params)
return (result, 200) if num_deleted > 0 else 404
def delete(self, instid, relationname, relationinstid):
"""Removes the specified instance of the model with the specified name
from the database.
Although :http:method:`delete` is an idempotent method according to
:rfc:`2616`, idempotency only means that subsequent identical requests
cannot have additional side-effects. Since the response code is not a
side effect, this method responds with :http:status:`204` only if an
object is deleted, and with :http:status:`404` when nothing is deleted.
If `relationname
.. versionadded:: 0.12.0
Added the `relationinstid` keyword argument.
.. versionadded:: 0.10.0
Added the `relationname` keyword argument.
"""
if instid is None:
# If no instance ID is provided, this request is an attempt to
# delete many instances of the model via a search with possible
# filters.
return self._delete_many()
was_deleted = False
for preprocessor in self.preprocessors['DELETE']:
returned_values =preprocessor(instance_id=instid, relation_name=relationname,
relation_instance_id=relationinstid)
if returned_values:
instid, relationname, relationinstid = returned_values
inst = get_by(self.session, self.model, instid, self.primary_key)
if relationname:
# If the request is ``DELETE /api/person/1/computers``, error 400.
if not relationinstid:
msg = ('Cannot DELETE entire "{0}"'
' relation').format(relationname)
return dict(message=msg), 400
# Otherwise, get the related instance to delete.
relation = getattr(inst, relationname)
related_model = get_related_model(self.model, relationname)
relation_instance = get_by(self.session, related_model,
relationinstid)
# Removes an object from the relation list.
relation.remove(relation_instance)
was_deleted = len(self.session.dirty) > 0
elif inst is not None:
self.session.delete(inst)
was_deleted = len(self.session.deleted) > 0
self.session.commit()
for postprocessor in self.postprocessors['DELETE_SINGLE']:
postprocessor(was_deleted=was_deleted)
return {}, 204 if was_deleted else 404
def post(self):
"""Creates a new instance of a given model based on request data.
This function parses the string contained in
:attr:`flask.request.data`` as a JSON object and then validates it with
a validator specified in the constructor of this class.
The :attr:`flask.request.data` attribute will be parsed as a JSON
object containing the mapping from field name to value to which to
initialize the created instance of the model.
After that, it separates all columns that defines relationships with
other entities, creates a model with the simple columns and then
creates instances of these submodels and associates them with the
related fields. This happens only at the first level of nesting.
Currently, this method can only handle instantiating a model with a
single level of relationship data.
"""
content_type = request.headers.get('Content-Type', None)
content_is_json = content_type.startswith('application/json')
is_msie = _is_msie8or9()
# Request must have the Content-Type: application/json header, unless
# the User-Agent string indicates that the client is Microsoft Internet
# Explorer 8 or 9 (which has a fixed Content-Type of 'text/html'; see
# issue #267).
if not is_msie and not content_is_json:
msg = 'Request must have "Content-Type: application/json" header'
return dict(message=msg), 415
# try to read the parameters for the model from the body of the request
try:
# HACK Requests made from Internet Explorer 8 or 9 don't have the
# correct content type, so request.get_json() doesn't work.
if is_msie:
data = json.loads(request.get_data()) or {}
else:
data = request.get_json() or {}
except (BadRequest, TypeError, ValueError, OverflowError) as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to decode data'), 400
# apply any preprocessors to the POST arguments
for preprocessor in self.preprocessors['POST']:
returned_values = preprocessor(data=data)
if returned_values:
data = returned_values
try:
# Convert the dictionary representation into an instance of the
# model.
instance = self.deserialize(data)
# Add the created model to the session.
self.session.add(instance)
self.session.commit()
# Get the dictionary representation of the new instance.
result = self.serialize(instance)
# Determine the value of the primary key for this instance and
# encode URL-encode it (in case it is a Unicode string).
pk_name = self.primary_key or primary_key_name(instance)
primary_key = result[pk_name]
try:
primary_key = str(primary_key)
except UnicodeEncodeError:
primary_key = url_quote_plus(primary_key.encode('utf-8'))
# The URL at which a client can access the newly created instance
# of the model.
url = '{0}/{1}'.format(request.base_url, primary_key)
# Provide that URL in the Location header in the response.
headers = dict(Location=url)
for postprocessor in self.postprocessors['POST']:
returned_value = postprocessor(result=result)
if returned_value:
result = returned_value
return result, 201, headers
except self.validation_exceptions as exception:
return self._handle_validation_exception(exception)
# Determine the value of the primary key for this instance and
# encode URL-encode it (in case it is a Unicode string).
pk_name = self.primary_key or primary_key_name(instance)
primary_key = result[pk_name]
try:
primary_key = str(primary_key)
except UnicodeEncodeError:
primary_key = url_quote_plus(primary_key.encode('utf-8'))
# The URL at which a client can access the newly created instance
# of the model.
url = '{0}/{1}'.format(request.base_url, primary_key)
# Provide that URL in the Location header in the response.
headers = dict(Location=url)
for postprocessor in self.postprocessors['POST']:
postprocessor(result=result)
return result, 201, headers
def patch(self, instid, relationname, relationinstid):
"""Updates the instance specified by ``instid`` of the named model, or
updates multiple instances if ``instid`` is ``None``.
The :attr:`flask.request.data` attribute will be parsed as a JSON
object containing the mapping from field name to value to which to
update the specified instance or instances.
If ``instid`` is ``None``, the query string will be used to search for
instances (using the :func:`_search` method), and all matching
instances will be updated according to the content of the request data.
See the :func:`_search` documentation on more information about search
parameters for restricting the set of instances on which updates will
be made in this case.
This function ignores the `relationname` and `relationinstid` keyword
arguments.
.. versionadded:: 0.12.0
Added the `relationinstid` keyword argument.
.. versionadded:: 0.10.0
Added the `relationname` keyword argument.
"""
content_type = request.headers.get('Content-Type', None)
content_is_json = content_type.startswith('application/json')
is_msie = _is_msie8or9()
# Request must have the Content-Type: application/json header, unless
# the User-Agent string indicates that the client is Microsoft Internet
# Explorer 8 or 9 (which has a fixed Content-Type of 'text/html'; see
# issue #267).
if not is_msie and not content_is_json:
msg = 'Request must have "Content-Type: application/json" header'
return dict(message=msg), 415
# try to load the fields/values to update from the body of the request
try:
# HACK Requests made from Internet Explorer 8 or 9 don't have the
# correct content type, so request.get_json() doesn't work.
if is_msie:
data = json.loads(request.get_data()) or {}
else:
data = request.get_json() or {}
except (BadRequest, TypeError, ValueError, OverflowError) as exception:
# this also happens when request.data is empty
current_app.logger.exception(str(exception))
return dict(message='Unable to decode data'), 400
# Check if the request is to patch many instances of the current model.
patchmany = instid is None
# Perform any necessary preprocessing.
if patchmany:
# Get the search parameters; all other keys in the `data`
# dictionary indicate a change in the model's field.
search_params = data.pop('q', {})
for preprocessor in self.preprocessors['PATCH_MANY']:
returned_values = preprocessor(search_params=search_params, data=data)
if returned_values:
search_params, data = returned_values
else:
for preprocessor in self.preprocessors['PATCH_SINGLE']:
returned_values = preprocessor(instance_id=instid, data=data)
if returned_values:
instid, data = returned_values
# Check for any request parameter naming a column which does not exist
# on the current model.
for field in data:
if not has_field(self.model, field):
msg = "Model does not have field '{0}'".format(field)
return dict(message=msg), 400
if patchmany:
try:
# create a SQLALchemy Query from the query parameter `q`
query = create_query(self.session, self.model, search_params)
except NotAuthorizedException:
return dict(message='Not Authorized'), 403
except Exception as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to construct query'), 400
else:
# create a SQLAlchemy Query which has exactly the specified row
query = query_by_primary_key(self.session, self.model, instid,
self.primary_key)
if query.count() == 0:
return {_STATUS: 404}, 404
assert query.count() == 1, 'Multiple rows with same ID'
try:
relations = self._update_relations(query, data)
except self.validation_exceptions as exception:
current_app.logger.exception(str(exception))
return self._handle_validation_exception(exception)
field_list = frozenset(data) ^ relations
data = dict((field, data[field]) for field in field_list)
# Special case: if there are any dates, convert the string form of the
# date into an instance of the Python ``datetime`` object.
data = strings_to_dates(self.model, data)
try:
# Let's update all instances present in the query
num_modified = 0
if data:
for item in query.all():
for field, value in data.items():
setattr(item, field, value)
num_modified += 1
self.session.commit()
except self.validation_exceptions as exception:
current_app.logger.exception(str(exception))
return self._handle_validation_exception(exception)
# Perform any necessary postprocessing.
if patchmany:
result = dict(num_modified=num_modified)
for postprocessor in self.postprocessors['PATCH_MANY']:
returned_value = postprocessor(query=query, result=result,
search_params=search_params)
if returned_value:
result = returned_value
else:
result = self._instid_to_dict(instid)
for postprocessor in self.postprocessors['PATCH_SINGLE']:
returned_value = postprocessor(result=result)
if returned_value:
result = returned_value
return result
def put(self, *args, **kw):
"""Alias for :meth:`patch`."""
return self.patch(*args, **kw)<|fim▁end|> | response.headers[key] = value
|
<|file_name|>graphmap.rs<|end_file_name|><|fim▁begin|>#![cfg(feature = "graphmap")]
extern crate petgraph;
use std::collections::HashSet;
use std::fmt;
use petgraph::prelude::*;
use petgraph::visit::Walker;
use petgraph::algo::dijkstra;
use petgraph::dot::{Config, Dot};
#[test]
fn simple() {
//let root = TypedArena::<Node<_>>::new();
let mut gr = UnGraphMap::new();
//let node = |&: name: &'static str| Ptr(root.alloc(Node(name.to_string())));
let a = gr.add_node("A");
let b = gr.add_node("B");
let c = gr.add_node("C");
let d = gr.add_node("D");
let e = gr.add_node("E");
let f = gr.add_node("F");
gr.add_edge(a, b, 7);
gr.add_edge(a, c, 9);
gr.add_edge(a, d, 14);
gr.add_edge(b, c, 10);
gr.add_edge(c, d, 2);
gr.add_edge(d, e, 9);
gr.add_edge(b, f, 15);
gr.add_edge(c, f, 11);
assert!(gr.add_edge(e, f, 5).is_none());
// duplicate edges
assert_eq!(gr.add_edge(f, b, 16), Some(15));
assert_eq!(gr.add_edge(f, e, 6), Some(5));
println!("{:?}", gr);
println!("{}", Dot::with_config(&gr, &[]));
assert_eq!(gr.node_count(), 6);
assert_eq!(gr.edge_count(), 9);
// check updated edge weight
assert_eq!(gr.edge_weight(e, f), Some(&6));
let scores = dijkstra(&gr, a, None, |e| *e.weight());
let mut scores: Vec<_> = scores.into_iter().collect();
scores.sort();
assert_eq!(
scores,
vec![
("A", 0),
("B", 7),
("C", 9),
("D", 11),
("E", 20),
("F", 20)
]
);
}
#[test]
fn remov() {
let mut g = UnGraphMap::new();
g.add_node(1);
g.add_node(2);
g.add_edge(1, 2, -1);
assert_eq!(g.edge_weight(1, 2), Some(&-1));
assert_eq!(g.edge_weight(2, 1), Some(&-1));
assert_eq!(g.neighbors(1).count(), 1);
let noexist = g.remove_edge(2, 3);
assert_eq!(noexist, None);
let exist = g.remove_edge(2, 1);
assert_eq!(exist, Some(-1));
assert_eq!(g.edge_count(), 0);
assert_eq!(g.edge_weight(1, 2), None);
assert_eq!(g.edge_weight(2, 1), None);
assert_eq!(g.neighbors(1).count(), 0);
}
#[test]
fn remove_directed() {
let mut g = GraphMap::<_, _, Directed>::with_capacity(0, 0);
g.add_edge(1, 2, -1);
println!("{:?}", g);
assert_eq!(g.edge_weight(1, 2), Some(&-1));
assert_eq!(g.edge_weight(2, 1), None);
assert_eq!(g.neighbors(1).count(), 1);
let noexist = g.remove_edge(2, 3);
assert_eq!(noexist, None);
let exist = g.remove_edge(2, 1);
assert_eq!(exist, None);
let exist = g.remove_edge(1, 2);
assert_eq!(exist, Some(-1));
println!("{:?}", g);
assert_eq!(g.edge_count(), 0);
assert_eq!(g.edge_weight(1, 2), None);
assert_eq!(g.edge_weight(2, 1), None);
assert_eq!(g.neighbors(1).count(), 0);
}
#[test]
fn dfs() {
let mut gr = UnGraphMap::default();
let h = gr.add_node("H");
let i = gr.add_node("I");
let j = gr.add_node("J");
let k = gr.add_node("K");
// Z is disconnected.
let z = gr.add_node("Z");
gr.add_edge(h, i, 1.);
gr.add_edge(h, j, 3.);
gr.add_edge(i, j, 1.);
gr.add_edge(i, k, 2.);
println!("{:?}", gr);
{
let mut cnt = 0;
let mut dfs = Dfs::new(&gr, h);
while let Some(_) = dfs.next(&gr) {
cnt += 1;
}
assert_eq!(cnt, 4);
}
{
let mut cnt = 0;
let mut dfs = Dfs::new(&gr, z);
while let Some(_) = dfs.next(&gr) {
cnt += 1;
}
assert_eq!(cnt, 1);
}
assert_eq!(Dfs::new(&gr, h).iter(&gr).count(), 4);
assert_eq!(Dfs::new(&gr, i).iter(&gr).count(), 4);
assert_eq!(Dfs::new(&gr, z).iter(&gr).count(), 1);
}
#[test]
fn edge_iterator() {
let mut gr = UnGraphMap::new();
let h = gr.add_node("H");
let i = gr.add_node("I");
let j = gr.add_node("J");
let k = gr.add_node("K");
gr.add_edge(h, i, 1);
gr.add_edge(h, j, 2);
gr.add_edge(i, j, 3);
gr.add_edge(i, k, 4);
let real_edges: HashSet<_> = gr.all_edges().map(|(a, b, &w)| (a, b, w)).collect();
let expected_edges: HashSet<_> =
vec![("H", "I", 1), ("H", "J", 2), ("I", "J", 3), ("I", "K", 4)]
.into_iter()
.collect();
assert_eq!(real_edges, expected_edges);
}
#[test]
fn from_edges() {
let gr =
GraphMap::<_, _, Undirected>::from_edges(&[("a", "b", 1), ("a", "c", 2), ("c", "d", 3)]);
assert_eq!(gr.node_count(), 4);
assert_eq!(gr.edge_count(), 3);
assert_eq!(gr[("a", "c")], 2);
let gr = GraphMap::<_, (), Undirected>::from_edges(&[
(0, 1),
(0, 2),
(0, 3),
(1, 2),
(1, 3),
(2, 3),
]);
assert_eq!(gr.node_count(), 4);
assert_eq!(gr.edge_count(), 6);
assert_eq!(gr.neighbors(0).count(), 3);
assert_eq!(gr.neighbors(1).count(), 3);
assert_eq!(gr.neighbors(2).count(), 3);
assert_eq!(gr.neighbors(3).count(), 3);
println!("{:?}", Dot::with_config(&gr, &[Config::EdgeNoLabel]));
}
#[test]
fn graphmap_directed() {
//let root = TypedArena::<Node<_>>::new();
let mut gr = DiGraphMap::<_, ()>::with_capacity(0, 0);
//let node = |&: name: &'static str| Ptr(root.alloc(Node(name.to_string())));
let a = gr.add_node("A");
let b = gr.add_node("B");
let c = gr.add_node("C");
let d = gr.add_node("D");
let e = gr.add_node("E");
let edges = [(a, b), (a, c), (a, d), (b, c), (c, d), (d, e), (b, b)];
gr.extend(&edges);
// Add reverse edges -- ok!
assert!(gr.add_edge(e, d, ()).is_none());<|fim▁hole|> // duplicate edge - no
assert!(!gr.add_edge(a, b, ()).is_none());
// duplicate self loop - no
assert!(!gr.add_edge(b, b, ()).is_none());
println!("{:#?}", gr);
}
fn assert_sccs_eq<N>(mut res: Vec<Vec<N>>, mut answer: Vec<Vec<N>>)
where
N: Ord + fmt::Debug,
{
// normalize the result and compare with the answer.
for scc in &mut res {
scc.sort();
}
res.sort();
for scc in &mut answer {
scc.sort();
}
answer.sort();
assert_eq!(res, answer);
}
#[test]
fn scc() {
let gr: GraphMap<_, u32, Directed> = GraphMap::from_edges(&[
(6, 0, 0),
(0, 3, 1),
(3, 6, 2),
(8, 6, 3),
(8, 2, 4),
(2, 5, 5),
(5, 8, 6),
(7, 5, 7),
(1, 7, 8),
(7, 4, 9),
(4, 1, 10),
]);
assert_sccs_eq(
petgraph::algo::kosaraju_scc(&gr),
vec![vec![0, 3, 6], vec![1, 4, 7], vec![2, 5, 8]],
);
}
#[test]
fn test_into_graph() {
let gr: GraphMap<_, u32, Directed> = GraphMap::from_edges(&[
(6, 0, 0),
(0, 3, 1),
(3, 6, 2),
(8, 6, 3),
(8, 2, 4),
(2, 5, 5),
(5, 8, 6),
(7, 5, 7),
(1, 7, 8),
(7, 4, 9),
(4, 1, 10),
]);
let graph: Graph<_, _, _> = gr.clone().into_graph();
println!("{}", Dot::new(&gr));
println!("{}", Dot::new(&graph));
// node weigths in `graph` are node identifiers in `gr`.
for edge in graph.edge_references() {
let a = edge.source();
let b = edge.target();
let aw = graph[a];
let bw = graph[b];
assert_eq!(&gr[(aw, bw)], edge.weight());
}
}
#[test]
fn test_all_edges_mut() {
// graph with edge weights equal to in+out
let mut graph: GraphMap<_, u32, Directed> =
GraphMap::from_edges(&[(0, 1, 1), (1, 2, 3), (2, 0, 2)]);
// change it so edge weight is equal to 2 * (in+out)
for (start, end, weight) in graph.all_edges_mut() {
*weight = (start + end) * 2;
}
// test it
for (start, end, weight) in graph.all_edges() {
assert_eq!((start + end) * 2, *weight);
}
}
#[test]
fn neighbors_incoming_includes_self_loops() {
let mut graph = DiGraphMap::new();
graph.add_node(());
graph.add_edge((), (), ());
let mut neighbors = graph.neighbors_directed((), Incoming);
assert_eq!(neighbors.next(), Some(()));
assert_eq!(neighbors.next(), None);
}
#[test]
fn undirected_neighbors_includes_self_loops() {
let mut graph = UnGraphMap::new();
graph.add_node(());
graph.add_edge((), (), ());
let mut neighbors = graph.neighbors(());
assert_eq!(neighbors.next(), Some(()));
assert_eq!(neighbors.next(), None);
}
#[test]
fn self_loops_can_be_removed() {
let mut graph = DiGraphMap::new();
graph.add_node(());
graph.add_edge((), (), ());
graph.remove_edge((), ());
assert_eq!(graph.neighbors_directed((), Outgoing).next(), None);
assert_eq!(graph.neighbors_directed((), Incoming).next(), None);
}<|fim▁end|> | |
<|file_name|>dJSON.spec.js<|end_file_name|><|fim▁begin|>describe('dJSON', function () {
'use strict';
var chai = require('chai');
var expect = chai.expect;
var dJSON = require('../lib/dJSON');
var path = 'x.y["q.{r}"].z';
var obj;
beforeEach(function () {
obj = {
x: {<|fim▁hole|> y: {
'q.{r}': {
z: 635
},
q: {
r: {
z: 1
}
}
}
},
'x-y': 5,
falsy: false
};
});
it('gets a value from an object with a path containing properties which contain a period', function () {
expect(dJSON.get(obj, path)).to.equal(635);
expect(dJSON.get(obj, 'x.y.q.r.z')).to.equal(1);
});
it('sets a value from an object with a path containing properties which contain a period', function () {
dJSON.set(obj, path, 17771);
expect(dJSON.get(obj, path)).to.equal(17771);
expect(dJSON.get(obj, 'x.y.q.r.z')).to.equal(1);
});
it('will return undefined when requesting a property with a dash directly', function () {
expect(dJSON.get(obj, 'x-y')).to.be.undefined;
});
it('will return the proper value when requesting a property with a dash by square bracket notation', function () {
expect(dJSON.get(obj, '["x-y"]')).to.equal(5);
});
it('returns a value that is falsy', function () {
expect(dJSON.get(obj, 'falsy')).to.equal(false);
});
it('sets a value that is falsy', function () {
dJSON.set(obj, 'new', false);
expect(dJSON.get(obj, 'new')).to.equal(false);
});
it('uses an empty object as default for the value in the set method', function () {
var newObj = {};
dJSON.set(newObj, 'foo.bar.lorem');
expect(newObj).to.deep.equal({
foo: {
bar: {
lorem: {}
}
}
});
});
it('does not create an object when a path exists as empty string', function () {
var newObj = {
nestedObject: {
anArray: [
'i have a value',
''
]
}
};
var newPath = 'nestedObject.anArray[1]';
dJSON.set(newObj, newPath, 17771);
expect(newObj).to.deep.equal({
nestedObject: {
anArray: [
'i have a value',
17771
]
}
});
});
it('creates an object from a path with a left curly brace', function () {
var newObj = {};
dJSON.set(newObj, path.replace('}', ''), 'foo');
expect(newObj).to.be.deep.equal({
x: {
y: {
'q.{r': {
z: 'foo'
}
}
}
});
});
it('creates an object from a path with a right curly brace', function () {
var newObj = {};
dJSON.set(newObj, path.replace('{', ''), 'foo');
expect(newObj).to.be.deep.equal({
x: {
y: {
'q.r}': {
z: 'foo'
}
}
}
});
});
it('creates an object from a path with curly braces', function () {
var newObj = {};
dJSON.set(newObj, path, 'foo');
expect(newObj).to.be.deep.equal({
x: {
y: {
'q.{r}': {
z: 'foo'
}
}
}
});
});
it('creates an object from a path without curly braces', function () {
var newObj = {};
dJSON.set(newObj, path.replace('{', '').replace('}', ''), 'foo');
expect(newObj).to.be.deep.equal({
x: {
y: {
'q.r': {
z: 'foo'
}
}
}
});
});
});<|fim▁end|> | |
<|file_name|>anonymize.cpp<|end_file_name|><|fim▁begin|>/* Copyright (c) 2009-2010 Satoshi Nakamoto
Copyright (c) 2009-2012 The Bitcoin developers
Copyright (c) 2013-2014 The StealthCoin/StealthSend Developers */
/* Copyright (c) 2014-2015, Triangles Developers */
/* See LICENSE for licensing information */
#include "anonymize.h"
#include "util.h"
#include <boost/filesystem.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/mutex.hpp>
#include <string>
#include <cstring>
char const* anonymize_tor_data_directory(
) {
static std::string const retrieved = (
GetDataDir(
) / "tor"
).string(
);
return retrieved.c_str(
);
}
char const* anonymize_service_directory(
) {
static std::string const retrieved = (
GetDataDir(
) / "onion"
).string(
);
return retrieved.c_str(
);
}
int check_interrupted(
) {
return boost::this_thread::interruption_requested(
) ? 1 : 0;
}
static boost::mutex initializing;
static std::auto_ptr<boost::unique_lock<boost::mutex> > uninitialized(<|fim▁hole|>);
void set_initialized(
) {
uninitialized.reset();
}
void wait_initialized(
) {
boost::unique_lock<boost::mutex> checking(initializing);
}<|fim▁end|> | new boost::unique_lock<boost::mutex>(
initializing
) |
<|file_name|>remove_duplicates.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# from https://github.com/mapillary/mapillary_tools/
# The MIT License (MIT)
#
# Copyright (c) 2014 Mapillary AB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import getopt
import os
import sys
import shutil
from math import asin, cos, radians, sin, sqrt
from PIL import Image
from lib.exif_pil import PILExifReader
class GPSDirectionDuplicateFinder:
"""Finds duplicates based on the direction the camera is pointing.
This supports the case where a panorama is being made."""
def __init__(self, max_diff):
self._prev_rotation = None
self._prev_unique_rotation = None
self._max_diff = max_diff
self._latest_text = ""
def get_latest_text(self):
return self._latest_text
def latest_is_duplicate(self, is_duplicate):
if not is_duplicate:
self._prev_unique_rotation = self._prev_rotation
def is_duplicate(self, file_path, exif_reader):
rotation = exif_reader.get_rotation()
if rotation is None:
return None
if self._prev_unique_rotation is None:
self._prev_rotation = rotation
return False
diff = abs(rotation - self._prev_unique_rotation)
is_duplicate = diff < self._max_diff
self._prev_rotation = rotation
self._latest_text = str(int(diff)) + " deg: " + str(is_duplicate)
return is_duplicate
class GPSDistance:
"""Calculates the distance between two sets of GPS coordinates."""
@staticmethod
def get_gps_distance(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees with a result in meters).
This is done using the Haversine Formula.
"""
# Convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
# Haversine formula
difflat = lat2 - lat1
difflon = lon2 - lon1
a = (sin(difflat / 2) ** 2) + (cos(lat1) * cos(lat2) * sin(difflon / 2)
** 2)
difflon = lon2 - lon1
c = 2 * asin(sqrt(a))
r = 6371000 # Radius of The Earth in meters.
# It is not a perfect sphere, so this is just good enough.
return c * r
class GPSSpeedErrorFinder:
"""Finds images in a sequence that might have an error in GPS data
or suggest a track to be split. It is done by looking at the
speed it would take to travel the distance in question."""
def __init__(self, max_speed_km_h, way_too_high_speed_km_h):
self._prev_lat_lon = None
self._previous = None
self._latest_text = ""
self._previous_filepath = None
self._max_speed_km_h = max_speed_km_h
self._way_too_high_speed_km_h = way_too_high_speed_km_h
self._high_speed = False
self._too_high_speed = False
def set_verbose(self, verbose):
self.verbose = verbose
def get_latest_text(self):
return self._latest_text
def is_error(self, file_path, exif_reader):
"""
Returns if there is an obvious error in the images exif data.
The given image is an instance of PIL's Image class.
the given exif is the data from the get_exif_data function.
"""
speed_gps = exif_reader.get_speed()
if speed_gps is None:
self._latest_text = "No speed given in EXIF data."
return False
self._latest_text = "Speed GPS: " + str(speed_gps) + " km/h"
if speed_gps > self._way_too_high_speed_km_h:
self._latest_text = ("GPS speed is unrealistically high: %s km/h."
% speed_gps)
self._too_high_speed = True
return True
elif speed_gps > self._max_speed_km_h:
self._latest_text = ("GPS speed is high: %s km/h."
% speed_gps )
self._high_speed = True
return True
latlong = exif_reader.get_lat_lon()
timestamp = exif_reader.get_time()
if self._prev_lat_lon is None or self._prev_time is None:
self._prev_lat_lon = latlong
self._prev_time = timestamp
self._previous_filepath = file_path
return False
if latlong is None or timestamp is None:
return False
diff_meters = GPSDistance.get_gps_distance(
self._prev_lat_lon[0], self._prev_lat_lon[1], latlong[0],
latlong[1])
diff_secs = (timestamp - self._prev_time).total_seconds()
if diff_secs == 0:
return False
speed_km_h = (diff_meters / diff_secs) * 3.6
if speed_km_h > self._way_too_high_speed_km_h:
self._latest_text = ("Speed between %s and %s is %s km/h, which is"
" unrealistically high." % (self._previous_filepath, file_path,
int(speed_km_h)))
self._too_high_speed = True
return True
elif speed_km_h > self._max_speed_km_h:
self._latest_text = "Speed between %s and %s is %s km/h." % (
self._previous_filepath, file_path, int(speed_km_h)
)
self._high_speed = True
return True
else:
return False
def is_fast(self):
return self._high_speed
def is_too_fast(self):
return self._too_high_speed
class GPSDistanceDuplicateFinder:
"""Finds duplicates images by looking at the distance between
two GPS points."""
def __init__(self, distance):
self._distance = distance
self._prev_lat_lon = None
self._previous = None
self._latest_text = ""
self._previous_filepath = None
self._prev_unique_lat_lon = None
def get_latest_text(self):
return self._latest_text
def latest_is_duplicate(self, is_duplicate):
if not is_duplicate:
self._prev_unique_lat_lon = self._prev_lat_lon
def is_duplicate(self, file_path, exif_reader):
"""
Returns if the given image is a duplicate of the previous image.
The given image is an instance of PIL's Image class.<|fim▁hole|> if self._prev_lat_lon is None:
self._prev_lat_lon = latlong
return False
if self._prev_unique_lat_lon is not None and latlong is not None:
diff_meters = GPSDistance.get_gps_distance(
self._prev_unique_lat_lon[0], self._prev_unique_lat_lon[1],
latlong[0], latlong[1])
self._previous_filepath = file_path
is_duplicate = diff_meters <= self._distance
self._prev_lat_lon = latlong
self._latest_text = file_path + ": " + str(
int(diff_meters)) + " m: " + str(is_duplicate)
return is_duplicate
else:
return False
class ImageRemover:
"""Moves images that are (almost) duplicates or contains errors in GPS
data into separate directories."""
def __init__(self, src_dir, duplicate_dir, error_dir):
self._testers = []
self._error_finders = []
self._src_dir = src_dir
self._duplicate_dir = duplicate_dir
self._error_dir = error_dir
self._dryrun = False
self.verbose = 0
def set_verbose(self, verbose):
self.verbose = verbose
def set_dry_run(self, dryrun):
self._dryrun = dryrun
def add_duplicate_finder(self, tester):
self._testers.append(tester)
def add_error_finder(self, finder):
self._error_finders.append(finder)
def _move_into_error_dir(self, file):
self._move_into_dir(file, self._error_dir)
def _move_into_duplicate_dir(self, file):
self._move_into_dir(file, self._duplicate_dir)
def _move_into_dir(self, file, dir):
if not self._dryrun and not os.path.exists(dir):
os.makedirs(dir)
filename = os.path.basename(file)
if not self._dryrun:
shutil.move(file, os.path.join(dir, filename))
print file, " => ", dir
def _read_capture_time(self, filepath):
reader = PILExifReader(filepath)
return reader.read_capture_time()
def _sort_file_list(self, file_list):
'''
Read capture times and sort files in time order.
'''
capture_times = [self._read_capture_time(filepath) for filepath in file_list]
sorted_times_files = zip(capture_times, file_list)
sorted_times_files.sort()
return zip(*sorted_times_files)
def do_magic(self):
"""Perform the task of finding and moving images."""
files = [os.path.join(self._src_dir, f) for f in os.listdir(self._src_dir)
if os.path.isfile(os.path.join(self._src_dir, f)) and
f.lower().endswith('.jpg')]
capturetime, files = self._sort_file_list(files)
for file_path in files:
exif_reader = PILExifReader(file_path)
is_error = self._handle_possible_erro(file_path, exif_reader)
if not is_error:
self._handle_possible_duplicate(file_path, exif_reader)
def _handle_possible_duplicate(self, file_path, exif_reader):
is_duplicate = True
verbose_text = []
for tester in self._testers:
is_this_duplicate = tester.is_duplicate(file_path, exif_reader)
if is_this_duplicate != None:
is_duplicate &= is_this_duplicate
verbose_text.append(tester.get_latest_text())
else:
verbose_text.append("No orientation")
if self.verbose >= 1:
print ", ".join(verbose_text), "=>", is_duplicate
if is_duplicate:
self._move_into_duplicate_dir(file_path)
for tester in self._testers:
tester.latest_is_duplicate(is_duplicate)
return is_duplicate
def _handle_possible_erro(self, file_path, exif_reader):
is_error = False
for finder in self._error_finders:
err = finder.is_error(file, exif_reader)
if err:
print finder.get_latest_text()
is_error |= err
if is_error:
self._move_into_error_dir(file_path)
return is_error
if __name__ == "__main__":
distance = 4
pan = 20
error_dir = "errors"
fast_km_h = 150
too_fast_km_h = 200
min_duplicates = 3
def print_help():
print """Usage: remove-duplicates.py [-h | -d] src_dir duplicate_dir
Finds images in src_dir and moves duplicates to duplicate_dir.
Both src_dir and duplicate_dir are mandatory. If src_dir is not .
and duplicate_dir is not given, it will be named "duplicate" and put
in the current directory.
If duplicate_dir does not exist, it will be created in the current
directory (no matter if it will be used or not).
In order to be considered a duplicate, the image must match ALL criteria
to be a duplicate. With default settings that is, it must have travelled
less than """ + str(distance) + """ meters and be panned less than """ \
"" + str(pan) + """ degrees.
This supports that you ride straight ahead with a significant speed,
that you make panoramas standing still and standing still waiting for
the red light to change into green.
Important: The upload.py from Mapillary uploads *recursively* so do not
put the duplicate_dir under the dir your are uploading from!
Options:
-e --error-dir Give the directory to put pictures into, if they
contains obvious errors.
Default value is""" + error_dir + """
-h --help Print this message and exit.
-d --distance Give the maximum distance in meters images must be taken
not to be considered duplicates. Default is """ \
"" + str(distance) + """ meters.
The distance is calculated from embedded GPS data. If there
is no GPS data the images are ignored.
-a --fast The speed (km/h) which is a bit too fast.
E.g. 40 for a bicycle.
Default value is: """ + str(fast_km_h) + """ km/h
-t --too-fast The speed (km/h) which is way too fast.
E.g. 70 for a bicycle.
Default value is: """ + str(too_fast_km_h) + """ km/h
-p --pan The maximum distance in degrees (0-360) the image must be
panned in order not to be considered a duplicate.
Default is""" + str(pan) + """ degrees.
-m --min-dup Minimum duplicates for a duplicate to be removed.
Default is """ + str(min_duplicates), """.
When larger than 0 the duplicate feature is only used to
remove images due to larger stops, like a red traffic
light. If going really slow this will also cause moving
images.
When 0 individual images are also moved, when the speed
is slow, images will be moved giving a more consistent
expirience when viewing them one by one.
-n --dry-run Do not move any files. Just simulate.
-v --verbose Print extra info.
"""
dryrun = False
verbose = 0
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:p:nve:m:a:t:",
["help", "distance=", "pan=", "dry-run",
"verbose", "error-dir", "min-dup",
"fast=", "too-fast="])
except getopt.GetoptError, err:
print str(err)
sys.exit(2)
for switch, value in opts:
if switch in ("-h", "--help"):
print_help()
sys.exit(0)
elif switch in ("-d", "--distance"):
distance = float(value)
elif switch in ("-p", "--pan"):
pan = float(value)
elif switch in ("-n", "--dry-run"):
dryrun = True
elif switch in ("-v", "--verbose"):
verbose += 1
elif switch in ("-e", "--error-dir"):
error_dir = value
elif switch in ("-m", "--min-dup"):
min_duplicates = int(value)
elif switch in ("-a", "--fast"):
fast_km_h = float(value)
elif switch in ("-t", "--too-fast"):
too_fast_km_h = float(value)
if len(args) == 1 and args[0] != ".":
duplicate_dir = "duplicates"
elif len(args) < 2:
print_help()
sys.exit(2)
else:
duplicate_dir = args[1]
src_dir = args[0]
distance_finder = GPSDistanceDuplicateFinder(distance)
direction_finder = GPSDirectionDuplicateFinder(pan)
speed_error_finder = GPSSpeedErrorFinder(fast_km_h, too_fast_km_h)
image_remover = ImageRemover(src_dir, duplicate_dir, error_dir)
image_remover.set_dry_run(dryrun)
image_remover.set_verbose(verbose)
# Modular: Multiple testers can be added.
image_remover.add_duplicate_finder(distance_finder)
image_remover.add_duplicate_finder(direction_finder)
image_remover.add_error_finder(speed_error_finder)
try:
image_remover.do_magic()
except KeyboardInterrupt:
print "You cancelled."
sys.exit(1)
finally:
show_split = False
if speed_error_finder.is_fast():
show_split = True
print
print ("It looks like you have gone really fast between"
+" some images.")
print "Strongly consider splitting them into multiple series."
print "See the messages earlier."
if speed_error_finder.is_too_fast():
show_split = True
print
print ("It looks like yo have gone unrealistically fast"
+ "between some images to be ok.")
print ("Mabye your GPS started out with a wrong location "
+ "or you traveled between sets?")
print "See the messages earlier."
if show_split:
print
print ("See http://blog.mapillary.com/update/2014/06/16/actioncam-workflow.html"
+ " on how")
print ("to use time_split.py to automatically split a lot "
+ "of images into multiple series.")<|fim▁end|> | the given exif is the data from the get_exif_data function.
"""
latlong = exif_reader.get_lat_lon()
|
<|file_name|>server.py<|end_file_name|><|fim▁begin|>from flask import Flask
import numerals
app = Flask("Numerical converter")
@app.route("/")
def home():
return "Hello from converter"
<|fim▁hole|>
@app.route("/<arabic>/roman")
def to_roman(arabic):
print("Converting {} to roman".format(arabic))
converted = numerals.convert_arabic_to_roman(int(arabic))
print("Conversion result: ", converted)
return converted
@app.route("/<roman>/arabic")
def to_arabic(roman):
print("Converting {} to arabic".format(roman))
converted = numerals.convert_roman_to_arabic(roman)
print("Conversion result: ", converted)
return str(converted)
if __name__ == "__main__":
app.run()<|fim▁end|> | |
<|file_name|>json.js<|end_file_name|><|fim▁begin|>var send = require("./index")
module.exports = sendJson
/* sendJson := (HttpRequest, HttpResponse, Value | {
body: Value,
headers?: Object<String, String>,
statusCode?: Number
})
*/
function sendJson(req, res, value, replacer, space) {
if (!value || (!value.statusCode && !value.headers)) {
value = { body: value }
}
value.headers = value.headers || {}
value.body = JSON.stringify(value.body, replacer, space)
value.headers["Content-Type"] = "application/json"<|fim▁hole|>}<|fim▁end|> |
send(req, res, value) |
<|file_name|>misc.ts<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2018-2021 mol* contributors, licensed under MIT, See LICENSE file for more info.
*
* @author Alexander Rose <[email protected]>
*/
export const halfPI = Math.PI / 2;
export const PiDiv180 = Math.PI / 180;
export function degToRad(deg: number) {
return deg * PiDiv180; // deg * Math.PI / 180
}
export function radToDeg(rad: number) {
return rad / PiDiv180; // rad * 180 / Math.PI
}
export function isPowerOfTwo(x: number) {
return (x !== 0) && (x & (x - 1)) === 0;
}<|fim▁hole|>
/** return the value that has the largest absolute value */
export function absMax(...values: number[]) {
let max = 0;
let absMax = 0;
for (let i = 0, il = values.length; i < il; ++i) {
const value = values[i];
const abs = Math.abs(value);
if (abs > absMax) {
max = value;
absMax = abs;
}
}
return max;
}
/** Length of an arc with angle in radians */
export function arcLength(angle: number, radius: number) {
return angle * radius;
}
/** Create an outward spiral of given `radius` on a 2d grid */
export function spiral2d(radius: number) {
let x = 0;
let y = 0;
const delta = [0, -1];
const size = radius * 2 + 1;
const halfSize = size / 2;
const out: [number, number][] = [];
for (let i = Math.pow(size, 2); i > 0; --i) {
if ((-halfSize < x && x <= halfSize) && (-halfSize < y && y <= halfSize)) {
out.push([x, y]);
}
if (x === y || (x < 0 && x === -y) || (x > 0 && x === 1 - y)) {
[delta[0], delta[1]] = [-delta[1], delta[0]]; // change direction
}
x += delta[0];
y += delta[1];
}
return out;
}<|fim▁end|> | |
<|file_name|>gen_test.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import datetime
import functools
import sys
import textwrap
import time
import platform
import weakref
from tornado.concurrent import return_future, Future
from tornado.escape import url_escape
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, asynchronous, HTTPError
from tornado import gen
try:
from concurrent import futures
except ImportError:
futures = None
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
class GenEngineTest(AsyncTestCase):
def setUp(self):
super(GenEngineTest, self).setUp()
self.named_contexts = []
def named_context(self, name):
@contextlib.contextmanager
def context():
self.named_contexts.append(name)
try:
yield
finally:
self.assertEqual(self.named_contexts.pop(), name)
return context
def run_gen(self, f):
f()
return self.wait()
def delay_callback(self, iterations, callback, arg):
"""Runs callback(arg) after a number of IOLoop iterations."""
if iterations == 0:
callback(arg)
else:
self.io_loop.add_callback(functools.partial(
self.delay_callback, iterations - 1, callback, arg))
@return_future
def async_future(self, result, callback):
self.io_loop.add_callback(callback, result)
def test_no_yield(self):
@gen.engine
def f():
self.stop()
self.run_gen(f)
def test_inline_cb(self):
@gen.engine
def f():
(yield gen.Callback("k1"))()
res = yield gen.Wait("k1")
self.assertTrue(res is None)
self.stop()
self.run_gen(f)
def test_ioloop_cb(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.stop()
self.run_gen(f)
def test_exception_phase1(self):
@gen.engine
def f():
1 / 0
self.assertRaises(ZeroDivisionError, self.run_gen, f)
def test_exception_phase2(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
1 / 0
self.assertRaises(ZeroDivisionError, self.run_gen, f)
def test_exception_in_task_phase1(self):
def fail_task(callback):
1 / 0
@gen.engine
def f():
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.stop()
self.run_gen(f)
def test_exception_in_task_phase2(self):
# This is the case that requires the use of stack_context in gen.engine
def fail_task(callback):
self.io_loop.add_callback(lambda: 1 / 0)
@gen.engine
def f():
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.stop()
self.run_gen(f)
def test_with_arg(self):
@gen.engine
def f():
(yield gen.Callback("k1"))(42)
res = yield gen.Wait("k1")
self.assertEqual(42, res)
self.stop()
self.run_gen(f)
def test_with_arg_tuple(self):
@gen.engine
def f():
(yield gen.Callback((1, 2)))((3, 4))
res = yield gen.Wait((1, 2))
self.assertEqual((3, 4), res)
self.stop()
self.run_gen(f)
def test_key_reuse(self):
@gen.engine
def f():
yield gen.Callback("k1")
yield gen.Callback("k1")
self.stop()
self.assertRaises(gen.KeyReuseError, self.run_gen, f)
def test_key_reuse_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
yield gen.Callback((1, 2))
self.stop()
self.assertRaises(gen.KeyReuseError, self.run_gen, f)
def test_key_mismatch(self):
@gen.engine
def f():
yield gen.Callback("k1")
yield gen.Wait("k2")
self.stop()
self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
def test_key_mismatch_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
yield gen.Wait((2, 3))
self.stop()
self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
def test_leaked_callback(self):
@gen.engine
def f():
yield gen.Callback("k1")
self.stop()
self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
def test_leaked_callback_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
self.stop()
self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
def test_parallel_callback(self):
@gen.engine
def f():
for k in range(3):
self.io_loop.add_callback((yield gen.Callback(k)))
yield gen.Wait(1)
self.io_loop.add_callback((yield gen.Callback(3)))
yield gen.Wait(0)
yield gen.Wait(3)
yield gen.Wait(2)
self.stop()
self.run_gen(f)
def test_bogus_yield(self):
@gen.engine
def f():
yield 42
self.assertRaises(gen.BadYieldError, self.run_gen, f)
def test_bogus_yield_tuple(self):
@gen.engine
def f():
yield (1, 2)
self.assertRaises(gen.BadYieldError, self.run_gen, f)
def test_reuse(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback(0)))
yield gen.Wait(0)
self.stop()
self.run_gen(f)
self.run_gen(f)
def test_task(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
self.stop()
self.run_gen(f)
def test_wait_all(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield gen.WaitAll(["k1", "k2"])
self.assertEqual(results, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_exception_in_yield(self):
@gen.engine
def f():
try:
yield gen.Wait("k1")
raise Exception("did not get expected exception")
except gen.UnknownKeyError:
pass
self.stop()
self.run_gen(f)
def test_resume_after_exception_in_yield(self):
@gen.engine
def f():
try:
yield gen.Wait("k1")
raise Exception("did not get expected exception")
except gen.UnknownKeyError:
pass
(yield gen.Callback("k2"))("v2")
self.assertEqual((yield gen.Wait("k2")), "v2")
self.stop()
self.run_gen(f)
def test_orphaned_callback(self):
@gen.engine
def f():
self.orphaned_callback = yield gen.Callback(1)
try:
self.run_gen(f)
raise Exception("did not get expected exception")
except gen.LeakedCallbackError:
pass
self.orphaned_callback()
def test_multi(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield [gen.Wait("k1"), gen.Wait("k2")]
self.assertEqual(results, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_multi_dict(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield dict(foo=gen.Wait("k1"), bar=gen.Wait("k2"))
self.assertEqual(results, dict(foo="v1", bar="v2"))
self.stop()
self.run_gen(f)
# The following tests explicitly run with both gen.Multi
# and gen.multi_future (Task returns a Future, so it can be used
# with either).
def test_multi_yieldpoint_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield gen.Multi([
gen.Task(self.delay_callback, 3, arg="v1"),
gen.Task(self.delay_callback, 1, arg="v2"),
])
self.assertEqual(responses, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_multi_yieldpoint_dict_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield gen.Multi(dict(
foo=gen.Task(self.delay_callback, 3, arg="v1"),
bar=gen.Task(self.delay_callback, 1, arg="v2"),
))
self.assertEqual(responses, dict(foo="v1", bar="v2"))
self.stop()
self.run_gen(f)
def test_multi_future_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield gen.multi_future([
gen.Task(self.delay_callback, 3, arg="v1"),
gen.Task(self.delay_callback, 1, arg="v2"),
])
self.assertEqual(responses, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_multi_future_dict_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield gen.multi_future(dict(
foo=gen.Task(self.delay_callback, 3, arg="v1"),
bar=gen.Task(self.delay_callback, 1, arg="v2"),
))
self.assertEqual(responses, dict(foo="v1", bar="v2"))
self.stop()
self.run_gen(f)
@skipOnTravis
@gen_test
def test_multi_performance(self):
# Yielding a list used to have quadratic performance; make
# sure a large list stays reasonable. On my laptop a list of
# 2000 used to take 1.8s, now it takes 0.12.
start = time.time()
yield [gen.Task(self.io_loop.add_callback) for i in range(2000)]
end = time.time()
self.assertLess(end - start, 1.0)
@gen_test
def test_multi_empty(self):
# Empty lists or dicts should return the same type.
x = yield []
self.assertTrue(isinstance(x, list))
y = yield {}
self.assertTrue(isinstance(y, dict))
@gen_test
def test_multi_mixed_types(self):
# A YieldPoint (Wait) and Future (Task) can be combined
# (and use the YieldPoint codepath)
(yield gen.Callback("k1"))("v1")
responses = yield [gen.Wait("k1"),
gen.Task(self.delay_callback, 3, arg="v2")]
self.assertEqual(responses, ["v1", "v2"])
@gen_test
def test_future(self):
result = yield self.async_future(1)
self.assertEqual(result, 1)
@gen_test
def test_multi_future(self):
results = yield [self.async_future(1), self.async_future(2)]
self.assertEqual(results, [1, 2])
@gen_test
def test_multi_dict_future(self):
results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
self.assertEqual(results, dict(foo=1, bar=2))
def test_arguments(self):
@gen.engine
def f():
(yield gen.Callback("noargs"))()
self.assertEqual((yield gen.Wait("noargs")), None)
(yield gen.Callback("1arg"))(42)
self.assertEqual((yield gen.Wait("1arg")), 42)
(yield gen.Callback("kwargs"))(value=42)
result = yield gen.Wait("kwargs")
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((), dict(value=42)), result)
self.assertEqual(dict(value=42), result.kwargs)
(yield gen.Callback("2args"))(42, 43)
result = yield gen.Wait("2args")
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((42, 43), {}), result)
self.assertEqual((42, 43), result.args)
def task_func(callback):
callback(None, error="foo")
result = yield gen.Task(task_func)
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((None,), dict(error="foo")), result)
self.stop()
self.run_gen(f)
def test_stack_context_leak(self):
# regression test: repeated invocations of a gen-based
# function should not result in accumulated stack_contexts
def _stack_depth():
head = stack_context._state.contexts[1]
length = 0
while head is not None:
length += 1
head = head.old_contexts[1]
return length
@gen.engine
def inner(callback):
yield gen.Task(self.io_loop.add_callback)
callback()
@gen.engine
def outer():
for i in range(10):
yield gen.Task(inner)
stack_increase = _stack_depth() - initial_stack_depth
self.assertTrue(stack_increase <= 2)
self.stop()
initial_stack_depth = _stack_depth()
self.run_gen(outer)
def test_stack_context_leak_exception(self):
# same as previous, but with a function that exits with an exception
@gen.engine
def inner(callback):
yield gen.Task(self.io_loop.add_callback)
1 / 0
@gen.engine
def outer():
for i in range(10):
try:
yield gen.Task(inner)
except ZeroDivisionError:
pass
stack_increase = len(stack_context._state.contexts) - initial_stack_depth
self.assertTrue(stack_increase <= 2)
self.stop()
initial_stack_depth = len(stack_context._state.contexts)
self.run_gen(outer)
<|fim▁hole|> # Technically this function should stack_context.wrap its callback
# upon entry. However, it is very common for this step to be
# omitted.
def step2():
self.assertEqual(self.named_contexts, ['a'])
self.io_loop.add_callback(callback)
with stack_context.StackContext(self.named_context('a')):
self.io_loop.add_callback(step2)
@gen_test
def test_wait_transfer_stack_context(self):
# Wait should not pick up contexts from where callback was invoked,
# even if that function improperly fails to wrap its callback.
cb = yield gen.Callback('k1')
self.function_with_stack_context(cb)
self.assertEqual(self.named_contexts, [])
yield gen.Wait('k1')
self.assertEqual(self.named_contexts, [])
@gen_test
def test_task_transfer_stack_context(self):
yield gen.Task(self.function_with_stack_context)
self.assertEqual(self.named_contexts, [])
def test_raise_after_stop(self):
# This pattern will be used in the following tests so make sure
# the exception propagates as expected.
@gen.engine
def f():
self.stop()
1 / 0
with self.assertRaises(ZeroDivisionError):
self.run_gen(f)
def test_sync_raise_return(self):
# gen.Return is allowed in @gen.engine, but it may not be used
# to return a value.
@gen.engine
def f():
self.stop(42)
raise gen.Return()
result = self.run_gen(f)
self.assertEqual(result, 42)
def test_async_raise_return(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
self.stop(42)
raise gen.Return()
result = self.run_gen(f)
self.assertEqual(result, 42)
def test_sync_raise_return_value(self):
@gen.engine
def f():
raise gen.Return(42)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_sync_raise_return_value_tuple(self):
@gen.engine
def f():
raise gen.Return((1, 2))
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_async_raise_return_value(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_async_raise_return_value_tuple(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return((1, 2))
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_return_value(self):
# It is an error to apply @gen.engine to a function that returns
# a value.
@gen.engine
def f():
return 42
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_return_value_tuple(self):
# It is an error to apply @gen.engine to a function that returns
# a value.
@gen.engine
def f():
return (1, 2)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
@skipNotCPython
def test_task_refcounting(self):
# On CPython, tasks and their arguments should be released immediately
# without waiting for garbage collection.
@gen.engine
def f():
class Foo(object):
pass
arg = Foo()
self.arg_ref = weakref.ref(arg)
task = gen.Task(self.io_loop.add_callback, arg=arg)
self.task_ref = weakref.ref(task)
yield task
self.stop()
self.run_gen(f)
self.assertIs(self.arg_ref(), None)
self.assertIs(self.task_ref(), None)
class GenCoroutineTest(AsyncTestCase):
def setUp(self):
# Stray StopIteration exceptions can lead to tests exiting prematurely,
# so we need explicit checks here to make sure the tests run all
# the way through.
self.finished = False
super(GenCoroutineTest, self).setUp()
def tearDown(self):
super(GenCoroutineTest, self).tearDown()
assert self.finished
@gen_test
def test_sync_gen_return(self):
@gen.coroutine
def f():
raise gen.Return(42)
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_async_gen_return(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_sync_return(self):
@gen.coroutine
def f():
return 42
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@skipBefore33
@gen_test
def test_async_return(self):
# It is a compile-time error to return a value in a generator
# before Python 3.3, so we must test this with exec.
# Flatten the real global and local namespace into our fake globals:
# it's all global from the perspective of f().
global_namespace = dict(globals(), **locals())
local_namespace = {}
exec(textwrap.dedent("""
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
return 42
"""), global_namespace, local_namespace)
result = yield local_namespace['f']()
self.assertEqual(result, 42)
self.finished = True
@skipBefore33
@gen_test
def test_async_early_return(self):
# A yield statement exists but is not executed, which means
# this function "returns" via an exception. This exception
# doesn't happen before the exception handling is set up.
global_namespace = dict(globals(), **locals())
local_namespace = {}
exec(textwrap.dedent("""
@gen.coroutine
def f():
if True:
return 42
yield gen.Task(self.io_loop.add_callback)
"""), global_namespace, local_namespace)
result = yield local_namespace['f']()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_sync_return_no_value(self):
@gen.coroutine
def f():
return
result = yield f()
self.assertEqual(result, None)
self.finished = True
@gen_test
def test_async_return_no_value(self):
# Without a return value we don't need python 3.3.
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
return
result = yield f()
self.assertEqual(result, None)
self.finished = True
@gen_test
def test_sync_raise(self):
@gen.coroutine
def f():
1 / 0
# The exception is raised when the future is yielded
# (or equivalently when its result method is called),
# not when the function itself is called).
future = f()
with self.assertRaises(ZeroDivisionError):
yield future
self.finished = True
@gen_test
def test_async_raise(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
1 / 0
future = f()
with self.assertRaises(ZeroDivisionError):
yield future
self.finished = True
@gen_test
def test_pass_callback(self):
@gen.coroutine
def f():
raise gen.Return(42)
result = yield gen.Task(f)
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_replace_yieldpoint_exception(self):
# Test exception handling: a coroutine can catch one exception
# raised by a yield point and raise a different one.
@gen.coroutine
def f1():
1 / 0
@gen.coroutine
def f2():
try:
yield f1()
except ZeroDivisionError:
raise KeyError()
future = f2()
with self.assertRaises(KeyError):
yield future
self.finished = True
@gen_test
def test_swallow_yieldpoint_exception(self):
# Test exception handling: a coroutine can catch an exception
# raised by a yield point and not raise a different one.
@gen.coroutine
def f1():
1 / 0
@gen.coroutine
def f2():
try:
yield f1()
except ZeroDivisionError:
raise gen.Return(42)
result = yield f2()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_replace_context_exception(self):
# Test exception handling: exceptions thrown into the stack context
# can be caught and replaced.
# Note that this test and the following are for behavior that is
# not really supported any more: coroutines no longer create a
# stack context automatically; but one is created after the first
# YieldPoint (i.e. not a Future).
@gen.coroutine
def f2():
(yield gen.Callback(1))()
yield gen.Wait(1)
self.io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(self.io_loop.add_timeout,
self.io_loop.time() + 10)
except ZeroDivisionError:
raise KeyError()
future = f2()
with self.assertRaises(KeyError):
yield future
self.finished = True
@gen_test
def test_swallow_context_exception(self):
# Test exception handling: exceptions thrown into the stack context
# can be caught and ignored.
@gen.coroutine
def f2():
(yield gen.Callback(1))()
yield gen.Wait(1)
self.io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(self.io_loop.add_timeout,
self.io_loop.time() + 10)
except ZeroDivisionError:
raise gen.Return(42)
result = yield f2()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_moment(self):
calls = []
@gen.coroutine
def f(name, yieldable):
for i in range(5):
calls.append(name)
yield yieldable
# First, confirm the behavior without moment: each coroutine
# monopolizes the event loop until it finishes.
immediate = Future()
immediate.set_result(None)
yield [f('a', immediate), f('b', immediate)]
self.assertEqual(''.join(calls), 'aaaaabbbbb')
# With moment, they take turns.
calls = []
yield [f('a', gen.moment), f('b', gen.moment)]
self.assertEqual(''.join(calls), 'ababababab')
self.finished = True
calls = []
yield [f('a', gen.moment), f('b', immediate)]
self.assertEqual(''.join(calls), 'abbbbbaaaa')
@gen_test
def test_sleep(self):
yield gen.sleep(0.01)
self.finished = True
class GenSequenceHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.finish("3")
class GenCoroutineSequenceHandler(RequestHandler):
@gen.coroutine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.finish("3")
class GenCoroutineUnfinishedSequenceHandler(RequestHandler):
@asynchronous
@gen.coroutine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
# just write, don't finish
self.write("3")
class GenTaskHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
io_loop = self.request.connection.stream.io_loop
client = AsyncHTTPClient(io_loop=io_loop)
response = yield gen.Task(client.fetch, self.get_argument('url'))
response.rethrow()
self.finish(b"got response: " + response.body)
class GenExceptionHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
# This test depends on the order of the two decorators.
io_loop = self.request.connection.stream.io_loop
yield gen.Task(io_loop.add_callback)
raise Exception("oops")
class GenCoroutineExceptionHandler(RequestHandler):
@gen.coroutine
def get(self):
# This test depends on the order of the two decorators.
io_loop = self.request.connection.stream.io_loop
yield gen.Task(io_loop.add_callback)
raise Exception("oops")
class GenYieldExceptionHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
io_loop = self.request.connection.stream.io_loop
# Test the interaction of the two stack_contexts.
def fail_task(callback):
io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.finish('ok')
class UndecoratedCoroutinesHandler(RequestHandler):
@gen.coroutine
def prepare(self):
self.chunks = []
yield gen.Task(IOLoop.current().add_callback)
self.chunks.append('1')
@gen.coroutine
def get(self):
self.chunks.append('2')
yield gen.Task(IOLoop.current().add_callback)
self.chunks.append('3')
yield gen.Task(IOLoop.current().add_callback)
self.write(''.join(self.chunks))
class AsyncPrepareErrorHandler(RequestHandler):
@gen.coroutine
def prepare(self):
yield gen.Task(IOLoop.current().add_callback)
raise HTTPError(403)
def get(self):
self.finish('ok')
class GenWebTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/sequence', GenSequenceHandler),
('/coroutine_sequence', GenCoroutineSequenceHandler),
('/coroutine_unfinished_sequence',
GenCoroutineUnfinishedSequenceHandler),
('/task', GenTaskHandler),
('/exception', GenExceptionHandler),
('/coroutine_exception', GenCoroutineExceptionHandler),
('/yield_exception', GenYieldExceptionHandler),
('/undecorated_coroutine', UndecoratedCoroutinesHandler),
('/async_prepare_error', AsyncPrepareErrorHandler),
])
def test_sequence_handler(self):
response = self.fetch('/sequence')
self.assertEqual(response.body, b"123")
def test_coroutine_sequence_handler(self):
response = self.fetch('/coroutine_sequence')
self.assertEqual(response.body, b"123")
def test_coroutine_unfinished_sequence_handler(self):
response = self.fetch('/coroutine_unfinished_sequence')
self.assertEqual(response.body, b"123")
def test_task_handler(self):
response = self.fetch('/task?url=%s' % url_escape(self.get_url('/sequence')))
self.assertEqual(response.body, b"got response: 123")
def test_exception_handler(self):
# Make sure we get an error and not a timeout
with ExpectLog(app_log, "Uncaught exception GET /exception"):
response = self.fetch('/exception')
self.assertEqual(500, response.code)
def test_coroutine_exception_handler(self):
# Make sure we get an error and not a timeout
with ExpectLog(app_log, "Uncaught exception GET /coroutine_exception"):
response = self.fetch('/coroutine_exception')
self.assertEqual(500, response.code)
def test_yield_exception_handler(self):
response = self.fetch('/yield_exception')
self.assertEqual(response.body, b'ok')
def test_undecorated_coroutines(self):
response = self.fetch('/undecorated_coroutine')
self.assertEqual(response.body, b'123')
def test_async_prepare_error_handler(self):
response = self.fetch('/async_prepare_error')
self.assertEqual(response.code, 403)
class WithTimeoutTest(AsyncTestCase):
@gen_test
def test_timeout(self):
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(datetime.timedelta(seconds=0.1),
Future())
@gen_test
def test_completes_before_timeout(self):
future = Future()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.1),
lambda: future.set_result('asdf'))
result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
future, io_loop=self.io_loop)
self.assertEqual(result, 'asdf')
@gen_test
def test_fails_before_timeout(self):
future = Future()
self.io_loop.add_timeout(
datetime.timedelta(seconds=0.1),
lambda: future.set_exception(ZeroDivisionError()))
with self.assertRaises(ZeroDivisionError):
yield gen.with_timeout(datetime.timedelta(seconds=3600),
future, io_loop=self.io_loop)
@gen_test
def test_already_resolved(self):
future = Future()
future.set_result('asdf')
result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
future, io_loop=self.io_loop)
self.assertEqual(result, 'asdf')
@unittest.skipIf(futures is None, 'futures module not present')
@gen_test
def test_timeout_concurrent_future(self):
with futures.ThreadPoolExecutor(1) as executor:
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(self.io_loop.time(),
executor.submit(time.sleep, 0.1))
@unittest.skipIf(futures is None, 'futures module not present')
@gen_test
def test_completed_concurrent_future(self):
with futures.ThreadPoolExecutor(1) as executor:
yield gen.with_timeout(datetime.timedelta(seconds=3600),
executor.submit(lambda: None))
class WaitIteratorTest(AsyncTestCase):
@gen_test
def test_empty_iterator(self):
g = gen.WaitIterator()
self.assertTrue(g.done(), 'empty generator iterated')
with self.assertRaises(ValueError):
g = gen.WaitIterator(False, bar=False)
self.assertEqual(g.current_index, None, "bad nil current index")
self.assertEqual(g.current_future, None, "bad nil current future")
@gen_test
def test_already_done(self):
f1 = Future()
f2 = Future()
f3 = Future()
f1.set_result(24)
f2.set_result(42)
f3.set_result(84)
g = gen.WaitIterator(f1, f2, f3)
i = 0
while not g.done():
r = yield g.next()
# Order is not guaranteed, but the current implementation
# preserves ordering of already-done Futures.
if i == 0:
self.assertEqual(g.current_index, 0)
self.assertIs(g.current_future, f1)
self.assertEqual(r, 24)
elif i == 1:
self.assertEqual(g.current_index, 1)
self.assertIs(g.current_future, f2)
self.assertEqual(r, 42)
elif i == 2:
self.assertEqual(g.current_index, 2)
self.assertIs(g.current_future, f3)
self.assertEqual(r, 84)
i += 1
self.assertEqual(g.current_index, None, "bad nil current index")
self.assertEqual(g.current_future, None, "bad nil current future")
dg = gen.WaitIterator(f1=f1, f2=f2)
while not dg.done():
dr = yield dg.next()
if dg.current_index == "f1":
self.assertTrue(dg.current_future==f1 and dr==24,
"WaitIterator dict status incorrect")
elif dg.current_index == "f2":
self.assertTrue(dg.current_future==f2 and dr==42,
"WaitIterator dict status incorrect")
else:
self.fail("got bad WaitIterator index {}".format(
dg.current_index))
i += 1
self.assertEqual(dg.current_index, None, "bad nil current index")
self.assertEqual(dg.current_future, None, "bad nil current future")
def finish_coroutines(self, iteration, futures):
if iteration == 3:
futures[2].set_result(24)
elif iteration == 5:
futures[0].set_exception(ZeroDivisionError())
elif iteration == 8:
futures[1].set_result(42)
futures[3].set_result(84)
if iteration < 8:
self.io_loop.add_callback(self.finish_coroutines, iteration+1, futures)
@gen_test
def test_iterator(self):
futures = [Future(), Future(), Future(), Future()]
self.finish_coroutines(0, futures)
g = gen.WaitIterator(*futures)
i = 0
while not g.done():
try:
r = yield g.next()
except ZeroDivisionError:
self.assertIs(g.current_future, futures[0],
'exception future invalid')
else:
if i == 0:
self.assertEqual(r, 24, 'iterator value incorrect')
self.assertEqual(g.current_index, 2, 'wrong index')
elif i == 2:
self.assertEqual(r, 42, 'iterator value incorrect')
self.assertEqual(g.current_index, 1, 'wrong index')
elif i == 3:
self.assertEqual(r, 84, 'iterator value incorrect')
self.assertEqual(g.current_index, 3, 'wrong index')
i += 1
if __name__ == '__main__':
unittest.main()<|fim▁end|> | def function_with_stack_context(self, callback): |
<|file_name|>bound.js<|end_file_name|><|fim▁begin|>'use strict';<|fim▁hole|>// List is in format of {LB:lb,UB:ub}
var months = ['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC'];
function getMonth(dateString) {
var m = parseInt(dateString.split('-')[1])-1;
return months[m];
}
module.exports = function(bounds, steps, callback) {
var steps_bound = [];
var month, month_cost = {};
var i,bound;
var bm = []; // Temporary bounds
// Start with no bounds
steps.forEach(function(step) {
// lb, ub, lb defined
steps_bound.push([0, null, false]);
});
var c = 0;
bounds.forEach(function(bound, index){
switch(bound.type) {
case 'NOB': // We are good
return;
case 'LBC':
for(i = 0; i < steps.length; i++) {
if( steps_bound[i][0] === null || steps_bound[i][0] < bound.bound ){
steps_bound[i][0] = bound.bound;
steps_bound[i][2] = true;
}
}
return;
case 'LBM':
case 'LBT':
var b;
bm = {};
bound.bound.forEach(function(b) {
bm[b[0]] = b[1];
});
for(i = 0; i < steps.length; i++) {
// Almost the same code for LBM and LBT
b = (bound.type === 'LBM') ? bm[getMonth(steps[i])] : bm[steps[i]];
if( (typeof b !== 'undefined' && b !== null) && (steps_bound[i][0] === null || steps_bound[i][0] < b) ){
steps_bound[i][0] = b;
steps_bound[i][2] = true;
}
}
return;
case 'UBC':
for( i = 0; i < steps.length; i++ ){
if( steps_bound[i][1] === null || steps_bound[i][1] > bound.bound) {
steps_bound[i][1] = bound.bound;
}
}
return;
case 'UBM':
case 'UBT':
bm = {};
bound.bound.forEach(function(b) {
bm[b[0]] = b[1];
});
for( i = 0; i < steps.length; i++ ){
// Almost the same code for BM and BT
b = (bound.type === 'UBM') ? bm[getMonth(steps[i])] : bm[steps[i]];
if( (typeof b !== 'undefined' && b !== null ) && (steps_bound[i][1] === null || steps_bound[i][1] > b) ) {
steps_bound[i][1] = b;
}
}
return;
case 'EQT':
var b;
bm = {};
bound.bound.forEach(function(b) {
bm[b[0]] = b[1];
});
for( i = 0; i < steps.length; i++ ){
b = bm[steps[i]];
if( typeof b !=='undefined' && b !== null) {
if( steps_bound[i][0] === null || steps_bound[i][0] < b ) {
steps_bound[i][0] = b;
steps_bound[i][2] = true;
}
if( steps_bound[i][1] === null || steps_bound[i][1] > b ) {
steps_bound[i][1] = b;
}
}
}
return;
default :
throw new Error('Bad Bound Type: '+bound.type);
}
});
return steps_bound.map((bound) => {
return {
LB : bound[0],
UB : bound[1],
LBDefined : bound[2]
}
});
};<|fim▁end|> |
// Given a bound list and the time steps,
// return the LB and UB at each timestep. |
<|file_name|>CommentElement.ts<|end_file_name|><|fim▁begin|>import runTest = require("../TestRunner");
import ExpectElementStart = require("../ExpectedCallbacks/ExpectElementStart");<|fim▁hole|>describe("Comments", () => {
runTest({
name: "Single Element with nested comment",
input: "<dimension><!-- random random --></dimension>",
expect: [
ExpectElementStart("dimension"),
ExpectComment(" random random "),
ExpectElementEnd("dimension")
]
});
});<|fim▁end|> | import ExpectElementEnd = require("../ExpectedCallbacks/ExpectElementEnd");
import ExpectComment = require("../ExpectedCallbacks/ExpectComment");
|
<|file_name|>main.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core'
import { fromEvent as observableFromEvent } from 'rxjs'
import { Router, RouterEvent, NavigationStart, NavigationEnd, NavigationCancel, NavigationError } from '@angular/router'
import { ToastrService } from 'ngx-toastr'
import { ConfigService } from '../../services/config.service'
import { BModule } from '../../models/bmodules'
import { Profile } from '../../models/profile'
@Component({
selector: 'common-main',
templateUrl: './main.component.html',
styleUrls: ['./main.component.scss']
})
export class MainComponent implements OnInit {
profile: Profile
profileName: string
username: string
version: string
networkStatus = navigator.onLine
databaseStatus = false
isOnline = false
pageLoading = false
bmodules: BModule[] = []
constructor (
public configService: ConfigService,
private router: Router,
private toastr: ToastrService
) {
this.router.events.subscribe((event: RouterEvent) => {
this._navigationInterceptor(event)
})
}
menu: any[] = []
ngOnInit () {
if (!this.configService.profiles.length) {
this.router.navigate(['/setup'])
return
}
this.version = this.configService.version
this.profile = this.configService.getProfile()
if (this.profile) {
this.profileName = this.profile.name
this.username = this.configService.profile.remoteUsername
}
observableFromEvent(window, 'online')
.subscribe(e => {
this.networkStatus = true
this.setStatus()
})
observableFromEvent(window, 'offline')
.subscribe(e => {
this.networkStatus = false
this.setStatus()
})
<|fim▁hole|> this.setStatus()
}
setStatus () {
this.isOnline = this.networkStatus && this.databaseStatus
}
toggleSidemenu () {
this.configService.sidebarClosed = !this.configService.sidebarClosed
}
private _navigationInterceptor (event: RouterEvent): void {
if (event instanceof NavigationStart) {
this.pageLoading = true
}
if (event instanceof NavigationEnd) {
this._hideSpinner()
}
if (event instanceof NavigationCancel) {
this._hideSpinner()
}
if (event instanceof NavigationError) {
this._hideSpinner()
this.toastr.error('Could not load module. Check your internet connection', 'Load Failed')
}
}
private _hideSpinner (): void {
this.pageLoading = false
}
}<|fim▁end|> | |
<|file_name|>linear.py<|end_file_name|><|fim▁begin|>from sklearn import datasets
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
loaded_data = datasets.load_boston()
data_X = loaded_data.data
data_y = loaded_data.target
model = LinearRegression()
model.fit(data_X, data_y)
print(model.predict(data_X[:4,:]))
print(data_y[:4])
print(model.coef_)
print(model.intercept_)
print(model.score(data_X, data_y))
#X, y = datasets.make_regression(n_samples=100, n_features=1, n_targets=1, noise=20)
#plt.scatter(X,y)<|fim▁hole|><|fim▁end|> | #plt.show() |
<|file_name|>line_nav_iframes_in_inline_block.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""Test of line navigation output of Firefox."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'Line 1'",
" VISIBLE: 'Line 1', cursor=1",
"SPEECH OUTPUT: 'Line 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: 'Line 3'",
" VISIBLE: 'Line 3', cursor=1",
"SPEECH OUTPUT: 'Line 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"3. Line Up",
["BRAILLE LINE: 'Line 1'",<|fim▁hole|> "SPEECH OUTPUT: 'Line 1'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()<|fim▁end|> | " VISIBLE: 'Line 1', cursor=1", |
<|file_name|>login.service.ts<|end_file_name|><|fim▁begin|>import { HttpClient } from '@angular/common/http';
import { Injectable } from '@angular/core';
import { UserRoutes } from '@shared/routes/user.routes';
import { LoginViewModel } from '@shared/view-models/create-user/login.view-model';
import { TokenViewModel } from '@shared/view-models/create-user/token.view-model';
@Injectable({
providedIn: 'root'
})
export class LoginService {
<|fim▁hole|> return this.http.post<TokenViewModel>(UserRoutes.login().client(), viewModel).toPromise();
}
}<|fim▁end|> | constructor(private http: HttpClient) { }
login(viewModel: LoginViewModel): Promise<TokenViewModel | null> { |
<|file_name|>config.rs<|end_file_name|><|fim▁begin|>use rocket::config::ConfigError;
use rocket::Config;
use std::path::{Path, PathBuf};
#[derive(Debug)]
pub struct MoMMIConfig {
/// Address, Password
commloop: Option<(String, String)>,
github_key: Option<String>,
changelog_repo_path: Option<PathBuf>,
verify_github: bool,
changelog_delay: u64,
ssh_key: Option<PathBuf>,
changelog_repo_name: Option<String>,
}
impl MoMMIConfig {
pub fn new(config: &Config) -> Result<MoMMIConfig, String> {
//println!("{:?}", config.extras);
let commloop_address = match config.get_str("commloop-address") {
Ok(x) => Some(x.to_owned()),
Err(ConfigError::Missing(_)) => None,
Err(x) => return Err(format!("Unable to fetch commloop address config: {}", x)),
};
let commloop_password = match config.get_str("commloop-password") {
Ok(x) => Some(x.to_owned()),
Err(ConfigError::Missing(_)) => None,
Err(x) => return Err(format!("Unable to fetch commloop password config: {}", x)),
};
let commloop =
match (commloop_address, commloop_password) {
(Some(addr), Some(pass)) => Some((addr, pass)),
(None, None) => None,
_ => return Err(
"commloop-address and commloop-password must either both or neither be set."
.to_owned(),
),
};
let github_key = match config.get_str("github-key") {
Ok(x) => Some(x.to_owned()),
Err(ConfigError::Missing(_)) => None,
Err(x) => return Err(format!("Unable to fetch github key config: {}", x)),
};
let changelog_repo_path = match config.get_str("changelog-repo-path") {
Ok(x) => Some(x.into()),
Err(ConfigError::Missing(_)) => None,
Err(x) => return Err(format!("Unable to fetch changelog repo path config: {}", x)),
};
let changelog_repo_name = match config.get_str("changelog-repo-name") {
Ok(x) => Some(x.into()),
Err(ConfigError::Missing(_)) => None,
Err(x) => return Err(format!("Unable to fetch changelog repo name config: {}", x)),
};
let verify_github = match config.get_bool("verify-github") {
Ok(x) => x,
Err(ConfigError::Missing(_)) => true,
Err(x) => return Err(format!("Unable to fetch verify_github config: {}", x)),
};
<|fim▁hole|> Ok(x) => Some(x.into()),
Err(ConfigError::Missing(_)) => None,
Err(x) => return Err(format!("Unable to fetch ssh key config: {}", x)),
};
let changelog_delay = match config.get_int("changelog-delay") {
Ok(x) => x as u64,
Err(ConfigError::Missing(_)) => 5,
Err(x) => return Err(format!("Unable to fetch ssh key config: {}", x)),
};
Ok(MoMMIConfig {
commloop,
github_key,
changelog_repo_path,
verify_github,
ssh_key,
changelog_delay,
changelog_repo_name,
})
}
// Order of the tuple is address, password.
pub fn get_commloop(&self) -> Option<(&str, &str)> {
match self.commloop {
None => None,
Some((ref addr, ref pass)) => Some((addr.as_str(), pass.as_str())),
}
}
pub fn has_commloop(&self) -> bool {
self.commloop.is_some()
}
pub fn has_github_key(&self) -> bool {
self.github_key.is_some()
}
pub fn get_github_key(&self) -> Option<&str> {
self.github_key.as_ref().map(String::as_ref)
}
pub fn has_changelog_repo_path(&self) -> bool {
self.changelog_repo_path.is_some()
}
pub fn get_changelog_repo_path(&self) -> Option<&Path> {
self.changelog_repo_path.as_ref().map(|p| &**p)
}
pub fn verify_github(&self) -> bool {
self.verify_github
}
pub fn get_changelog_delay(&self) -> u64 {
self.changelog_delay
}
pub fn get_ssh_key(&self) -> Option<&Path> {
self.ssh_key.as_ref().map(|p| &**p)
}
pub fn get_changelog_repo_name(&self) -> Option<&str> {
self.changelog_repo_name.as_ref().map(|p| &**p)
}
}<|fim▁end|> | let ssh_key = match config.get_str("ssh-key") { |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>require('dotenv').config({ silent: true });<|fim▁hole|>var Express = require('express');
var path = require('path');
var fs = require('fs');
var merge = require('lodash/merge');
var proxy = require('proxy-middleware');
var ejs = require('ejs');
var config = require('./config');
var server = new Express();
server.set('port', config.PORT);
server.engine('html', require('ejs').renderFile);
server.set('view engine', 'ejs');
server.set('views', path.resolve(__dirname, '../www'));
server.locals.CONFIG = escape(JSON.stringify(config));
server.use(config.API_PROXY_PATH, proxy(config.API_ENDPOINT));
server.get('/', function (req, res) {
res.render('index.html');
});
server.use(Express.static(path.resolve(__dirname, '../www')));
server.get('/404', function (req, res) {
res.render('404.html');
});
server.listen(server.get('port'), function (err) {
if (err) {
console.log('error while starting server', err);
}
console.log('Gandalf is started to listen at localhost:' + server.get('port'));
});<|fim▁end|> | |
<|file_name|>angle.rs<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
//! Angles for astronomy
use std;
pub const TWO_PI: f64 = 2.0 * std::f64::consts::PI;
/**
Computes the angular separation between two angular points
# Arguments
* `p1a1`: Angle 1 of point 1 *| in radians*
* `p1a2`: Angle 2 of point 1 *| in radians*
* `p2a1`: Angle 1 of point 2 *| in radians*
* `p2a2`: Angle 2 of point 2 *| in radians*
Angle 1 may be right ascension or longitude.
Angle 2 may be declination or latitude.
**/
#[inline]
pub fn anglr_sepr(p1a1: f64, p1a2: f64, p2a1: f64, p2a2: f64) -> f64
{
(
p1a2.sin() * p2a2.sin()
+ p1a2.cos() * p2a2.cos() * (p1a1 - p2a1).cos()
).acos()
}
/**
Computes an angle in degrees with decimals, from an angle
expressed in degrees, arcminutes and arcseconds
# Returns
* `deg`: Angle in degrees with decimals
# Arguments
* `deg`: Degrees
* `min`: Arcminutes
* `sec`: Arcseconds
**/
#[inline]
pub fn deg_frm_dms(deg: i64, min: i64, sec: f64) -> f64
{
let (M, S) =
if deg < 0 { (-min.abs(), -sec.abs()) }
else { (min, sec) };
(deg as f64) + (M as f64)/60.0 + S/3600.0
}
/**
Computes an angle expressed in degrees, arcminutes and
arcseconds, from an angle in degrees with decimals
# Returns
`(deg, min, sec)`
* `deg`: Degrees
* `min`: Arcminutes
* `sec`: Arcseconds
# Arguments
* `deg`: Angle in degrees with decimals
**/
#[inline]
pub fn dms_frm_deg(deg: f64) -> (i64, i64, f64)
{<|fim▁hole|> let minute = minutes as i64;
let seconds = (minutes - (minute as f64)) * 60.0;
(degree, minute, seconds)
}
/**
Computes an angle in degrees with decimals, from an angle
expressed in hours, minutes and seconds
# Arguments
* `hours`: Hours
* `min`: Minutes
* `sec`: Seconds
**/
#[inline]
pub fn deg_frm_hms(hour: i64, min: i64, sec: f64) -> f64
{
15.0 * ((hour as f64) + (min as f64)/60.0 + sec/3600.0)
}
/**
Computes an angle expressed in hours, minutes and
seconds, from an angle in degrees with decimals
# Returns
`(deg, min, sec)`
* `hour`: Hours
* `min`: Minutes
* `sec`: Seconds
# Arguments
* `deg`: Angle in degrees with decimals
**/
#[inline]
pub fn hms_frm_deg(deg: f64) -> (i64, i64, f64)
{
let hours = deg / 15.0;
let hour = hours as i64;
let minutes = (hours - (hour as f64)) * 60.0;
let minute = minutes as i64;
let seconds = (minutes - (minute as f64)) * 60.0;
(hour, minute, seconds)
}
/**
Computes the equivalent angle in [0, 360] degree range
# Arguments
* `angl`: Angle *| in degrees*
**/
#[inline]
pub fn limit_to_360(angl: f64) -> f64
{
let n = (angl / 360.0) as i64;
let limited_angl = angl - (360.0 * (n as f64));
if limited_angl < 0.0 { limited_angl + 360.0 }
else { limited_angl }
}
/**
Computes the equivalent angle in [0, 2π] radian range
# Arguments
* `angl`: Angle *| in radians*
**/
#[inline]
pub fn limit_to_two_PI(angl: f64) -> f64
{
let n = (angl / TWO_PI) as i64;
let limited_angl = angl - (TWO_PI * (n as f64));
if limited_angl < 0.0 { limited_angl + TWO_PI }
else { limited_angl }
}<|fim▁end|> | let degree = deg as i64;
let minutes = (deg - (degree as f64)) * 60.0; |
<|file_name|>attributes.py<|end_file_name|><|fim▁begin|># Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from opencensus.common import utils
def _format_attribute_value(value):
if isinstance(value, bool):
value_type = 'bool_value'
elif isinstance(value, int):
value_type = 'int_value'
elif isinstance(value, six.string_types):
value_type = 'string_value'
value = utils.get_truncatable_str(value)
elif isinstance(value, float):
value_type = 'double_value'
else:
return None
return {value_type: value}
class Attributes(object):
"""A set of attributes, each in the format [KEY]:[VALUE].
:type attributes: dict
:param attributes: The set of attributes. Each attribute's key can be up
to 128 bytes long. The value can be a string up to 256
bytes, an integer, a floating-point number, or the
Boolean values true and false.
"""
def __init__(self, attributes=None):
self.attributes = attributes or {}
def set_attribute(self, key, value):
"""Set a key value pair."""
self.attributes[key] = value
def delete_attribute(self, key):
"""Delete an attribute given a key if existed."""
self.attributes.pop(key, None)
def get_attribute(self, key):
"""Get a attribute value."""
return self.attributes.get(key, None)
def format_attributes_json(self):
"""Convert the Attributes object to json format."""
attributes_json = {}
for key, value in self.attributes.items():
key = utils.check_str_length(key)[0]
value = _format_attribute_value(value)
if value is not None:<|fim▁hole|> 'attributeMap': attributes_json
}
return result<|fim▁end|> | attributes_json[key] = value
result = { |
<|file_name|>browse.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2009-2012 Joshua Hughes <[email protected]>
#
import webbrowser<|fim▁hole|>class BrowseCommand(qmk.Command):
'''Open the supplied URL in the default web browser.'''
def __init__(self):
self._name = 'browse'
self._help = self.__doc__
@qmk.Command.actionRequiresArgument
def action(self, arg):
webbrowser.open_new_tab(arg)
def commands(): return [ BrowseCommand() ]<|fim▁end|> |
import qmk
|
<|file_name|>engine.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <[email protected]>'
__copyright__ = 'Copyright (C) 2014-2016 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
from horus.engine.driver.driver import Driver
from horus.engine.scan.ciclop_scan import CiclopScan
from horus.engine.scan.current_video import CurrentVideo
from horus.engine.calibration.pattern import Pattern
from horus.engine.calibration.calibration_data import CalibrationData
from horus.engine.calibration.camera_intrinsics import CameraIntrinsics
from horus.engine.calibration.autocheck import Autocheck
from horus.engine.calibration.laser_triangulation import LaserTriangulation
from horus.engine.calibration.platform_extrinsics import PlatformExtrinsics
from horus.engine.calibration.combo_calibration import ComboCalibration
from horus.engine.algorithms.image_capture import ImageCapture
from horus.engine.algorithms.image_detection import ImageDetection
from horus.engine.algorithms.laser_segmentation import LaserSegmentation
from horus.engine.algorithms.point_cloud_generation import PointCloudGeneration
from horus.engine.algorithms.point_cloud_roi import PointCloudROI
# Instances of engine modules
driver = Driver()
ciclop_scan = CiclopScan()
current_video = CurrentVideo()<|fim▁hole|>pattern = Pattern()
calibration_data = CalibrationData()
camera_intrinsics = CameraIntrinsics()
scanner_autocheck = Autocheck()
laser_triangulation = LaserTriangulation()
platform_extrinsics = PlatformExtrinsics()
combo_calibration = ComboCalibration()
image_capture = ImageCapture()
image_detection = ImageDetection()
laser_segmentation = LaserSegmentation()
point_cloud_generation = PointCloudGeneration()
point_cloud_roi = PointCloudROI()<|fim▁end|> | |
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import logging
import click
import socket
from mkdocs import __version__
from mkdocs import utils
from mkdocs import exceptions
from mkdocs import config
from mkdocs.commands import build, gh_deploy, new, serve
log = logging.getLogger(__name__)
# Disable the warning that Click displays (as of Click version 5.0) when users
# use unicode_literals in Python 2.
# See http://click.pocoo.org/dev/python3/#unicode-literals for more details.
click.disable_unicode_literals_warning = True
class State(object):
''' Maintain logging level.'''
def __init__(self, log_name='mkdocs', level=logging.INFO):
self.logger = logging.getLogger(log_name)
self.logger.propagate = False
stream = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)-7s - %(message)s ")
stream.setFormatter(formatter)
self.logger.addHandler(stream)
self.logger.setLevel(level)
pass_state = click.make_pass_decorator(State, ensure=True)
def verbose_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.logger.setLevel(logging.DEBUG)
return click.option('-v', '--verbose',
is_flag=True,
expose_value=False,
help='Enable verbose output',
callback=callback)(f)
def quiet_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.logger.setLevel(logging.ERROR)
return click.option('-q', '--quiet',
is_flag=True,
expose_value=False,
help='Silence warnings',
callback=callback)(f)
def common_options(f):
f = verbose_option(f)
f = quiet_option(f)
return f
clean_help = "Remove old files from the site_dir before building (the default)."
config_help = "Provide a specific MkDocs config"
dev_addr_help = ("IP address and port to serve documentation locally (default: "
"localhost:8000)")
strict_help = ("Enable strict mode. This will cause MkDocs to abort the build "
"on any warnings.")
theme_dir_help = "The theme directory to use when building your documentation."
theme_help = "The theme to use when building your documentation."
theme_choices = utils.get_theme_names()
site_dir_help = "The directory to output the result of the documentation build."
reload_help = "Enable the live reloading in the development server (this is the default)"
no_reload_help = "Disable the live reloading in the development server."
dirty_reload_help = "Enable the live reloading in the development server, but only re-build files that have changed"<|fim▁hole|>remote_branch_help = ("The remote branch to commit to for Github Pages. This "
"overrides the value specified in config")
remote_name_help = ("The remote name to commit to for Github Pages. This "
"overrides the value specified in config")
force_help = "Force the push to the repository."
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.version_option(__version__, '-V', '--version')
@common_options
def cli():
"""
MkDocs - Project documentation with Markdown.
"""
@cli.command(name="serve")
@click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
@click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')
@click.option('-s', '--strict', is_flag=True, help=strict_help)
@click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)
@click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help)
@click.option('--livereload', 'livereload', flag_value='livereload', help=reload_help, default=True)
@click.option('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help)
@click.option('--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help)
@common_options
def serve_command(dev_addr, config_file, strict, theme, theme_dir, livereload):
"""Run the builtin development server"""
logging.getLogger('tornado').setLevel(logging.WARNING)
# Don't override config value if user did not specify --strict flag
# Conveniently, load_config drops None values
strict = strict or None
try:
serve.serve(
config_file=config_file,
dev_addr=dev_addr,
strict=strict,
theme=theme,
theme_dir=theme_dir,
livereload=livereload
)
except (exceptions.ConfigurationError, socket.error) as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
@cli.command(name="build")
@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
@click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
@click.option('-s', '--strict', is_flag=True, help=strict_help)
@click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)
@click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help)
@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
@common_options
def build_command(clean, config_file, strict, theme, theme_dir, site_dir):
"""Build the MkDocs documentation"""
# Don't override config value if user did not specify --strict flag
# Conveniently, load_config drops None values
strict = strict or None
try:
build.build(config.load_config(
config_file=config_file,
strict=strict,
theme=theme,
theme_dir=theme_dir,
site_dir=site_dir
), dirty=not clean)
except exceptions.ConfigurationError as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
@cli.command(name="json")
@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
@click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
@click.option('-s', '--strict', is_flag=True, help=strict_help)
@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
@common_options
def json_command(clean, config_file, strict, site_dir):
"""Build the MkDocs documentation to JSON files
Rather than building your documentation to HTML pages, this
outputs each page in a simple JSON format. This command is
useful if you want to index your documentation in an external
search engine.
"""
log.warning("The json command is deprecated and will be removed in a "
"future MkDocs release. For details on updating: "
"http://www.mkdocs.org/about/release-notes/")
# Don't override config value if user did not specify --strict flag
# Conveniently, load_config drops None values
strict = strict or None
try:
build.build(config.load_config(
config_file=config_file,
strict=strict,
site_dir=site_dir
), dump_json=True, dirty=not clean)
except exceptions.ConfigurationError as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
@cli.command(name="gh-deploy")
@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
@click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
@click.option('-m', '--message', help=commit_message_help)
@click.option('-b', '--remote-branch', help=remote_branch_help)
@click.option('-r', '--remote-name', help=remote_name_help)
@click.option('--force', is_flag=True, help=force_help)
@common_options
def gh_deploy_command(config_file, clean, message, remote_branch, remote_name, force):
"""Deploy your documentation to GitHub Pages"""
try:
cfg = config.load_config(
config_file=config_file,
remote_branch=remote_branch,
remote_name=remote_name
)
build.build(cfg, dirty=not clean)
gh_deploy.gh_deploy(cfg, message=message, force=force)
except exceptions.ConfigurationError as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
@cli.command(name="new")
@click.argument("project_directory")
@common_options
def new_command(project_directory):
"""Create a new MkDocs project"""
new.new(project_directory)
if __name__ == '__main__': # pragma: no cover
cli()<|fim▁end|> | commit_message_help = ("A commit message to use when commiting to the "
"Github Pages remote branch") |
<|file_name|>service.rs<|end_file_name|><|fim▁begin|>// This file is generated. Do not edit
// @generated
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(unused_imports)]
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
#[derive(Clone,Default)]
pub struct Service {
// message fields
name: ::protobuf::SingularField<::std::string::String>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::std::cell::Cell<u32>,
}
impl Service {
pub fn new() -> Service {
::std::default::Default::default()
}
pub fn default_instance() -> &'static Service {
static mut instance: ::protobuf::lazy::Lazy<Service> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const Service,
};
unsafe {
instance.get(|| {
Service {
name: ::protobuf::SingularField::none(),
unknown_fields: ::protobuf::UnknownFields::new(),
cached_size: ::std::cell::Cell::new(0),
}
})
}
}
// optional string name = 1;
pub fn clear_name(&mut self) {
self.name.clear();
}
pub fn has_name(&self) -> bool {
self.name.is_some()
}
// Param is passed by value, moved
pub fn set_name(&mut self, v: ::std::string::String) {
self.name = ::protobuf::SingularField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_name<'a>(&'a mut self) -> &'a mut ::std::string::String {
if self.name.is_none() {
self.name.set_default();
};
self.name.as_mut().unwrap()<|fim▁hole|> }
// Take field
pub fn take_name(&mut self) -> ::std::string::String {
self.name.take().unwrap_or_else(|| ::std::string::String::new())
}
pub fn get_name<'a>(&'a self) -> &'a str {
match self.name.as_ref() {
Some(v) => &v,
None => "",
}
}
}
impl ::protobuf::Message for Service {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !try!(is.eof()) {
let (field_number, wire_type) = try!(is.read_tag_unpack());
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::ProtobufError::WireError("unexpected wire type".to_string()));
};
let tmp = self.name.set_default();
try!(is.read_string_into(tmp))
},
_ => {
let unknown = try!(is.read_unknown(wire_type));
self.mut_unknown_fields().add_value(field_number, unknown);
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in self.name.iter() {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if let Some(v) = self.name.as_ref() {
try!(os.write_string(1, &v));
};
try!(os.write_unknown_fields(self.get_unknown_fields()));
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields<'s>(&'s self) -> &'s ::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields<'s>(&'s mut self) -> &'s mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn type_id(&self) -> ::std::any::TypeId {
::std::any::TypeId::of::<Service>()
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for Service {
fn new() -> Service {
Service::new()
}
fn descriptor_static(_: ::std::option::Option<Service>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_string_accessor(
"name",
Service::has_name,
Service::get_name,
));
::protobuf::reflect::MessageDescriptor::new::<Service>(
"Service",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for Service {
fn clear(&mut self) {
self.clear_name();
self.unknown_fields.clear();
}
}
impl ::std::cmp::PartialEq for Service {
fn eq(&self, other: &Service) -> bool {
self.name == other.name &&
self.unknown_fields == other.unknown_fields
}
}
impl ::std::fmt::Debug for Service {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
static file_descriptor_proto_data: &'static [u8] = &[
0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x17, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x12, 0x0c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x4a, 0x9d, 0x01, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x05, 0x01, 0x0a, 0x08, 0x0a, 0x01,
0x02, 0x12, 0x03, 0x00, 0x08, 0x0f, 0x0a, 0x38, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x03, 0x00,
0x05, 0x01, 0x1a, 0x2c, 0x20, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x20, 0x61,
0x62, 0x6f, 0x75, 0x74, 0x20, 0x61, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x0a,
0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x03, 0x08, 0x0f, 0x0a, 0x0b, 0x0a, 0x04,
0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x04, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02,
0x00, 0x04, 0x12, 0x03, 0x04, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05,
0x12, 0x03, 0x04, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03,
0x04, 0x12, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x04, 0x19,
0x1a,
];
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto,
};
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>#![ warn( missing_docs ) ]
#![ warn( missing_debug_implementations ) ]
// #![ feature( type_name_of_val ) ]
// #![ feature( trace_macros ) ]
//!
//! Former - variation of builder pattern.
//!
//! # Sample
//! ```
//! use former::Former;
//!
//! #[derive( Debug, PartialEq, Former )]
//! pub struct Structure1
//! {
//! int_1 : i32,
//! string_1 : String,
//! vec_1 : Vec< i32 >,
//! hashmap_strings_1 : std::collections::HashMap< String, String >,<|fim▁hole|>//! string_optional_1 : Option< String >,
//! }
//!
//! fn main()
//! {
//!
//! let struct1 = Structure1::former()
//! .int_1( 13 )
//! .string_1( "Abcd".to_string() )
//! .vec_1().replace( vec![ 1, 3 ] ).end()
//! .hashmap_strings_1().insert( "k1", "v1" ).insert( "k2", "v2" ).end()
//! .string_optional_1( "dir1" )
//! .form();
//! dbg!( &struct1 );
//!
//! // < &struct1 = Structure1 {
//! // < int_1: 13,
//! // < string_1: "Abcd",
//! // < vec_1: [
//! // < 1,
//! // < 3,
//! // < ],
//! // < hashmap_strings_1: {
//! // < "k1": "v1",
//! // < "k2": "v2",
//! // < },
//! // < int_optional_1: None,
//! // < string_optional_1: Some(
//! // < "dir1",
//! // < ),
//! // < }
//!
//! }
//! ```
pub use former_runtime as runtime;
pub use former_meta as derive;
pub use derive::Former as Former;<|fim▁end|> | //! int_optional_1 : core::option::Option< i32 >, |
<|file_name|>bitcoin_ca.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="ca" version="2.0">
<context>
<name>AboutDialog</name>
<message>
<source>About Latinumcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<source><b>Latinumcoin Core</b> version</source>
<translation type="unfinished"/>
</message>
<message>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<source>The Latinumcoin Core developers</source>
<translation type="unfinished"/>
</message>
<message>
<source>(%1-bit)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<source>Double-click to edit address or label</source>
<translation>Doble click per editar l'adreça o l'etiqueta</translation>
</message>
<message>
<source>Create a new address</source>
<translation>Crear una nova adrça</translation>
</message>
<message>
<source>&New</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copia la selecció actual al porta-retalls del sistema</translation>
</message>
<message>
<source>&Copy</source>
<translation type="unfinished"/>
</message>
<message>
<source>C&lose</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Delete</source>
<translation>&Eliminar</translation>
</message>
<message>
<source>Choose the address to send coins to</source>
<translation type="unfinished"/>
</message>
<message>
<source>Choose the address to receive coins with</source>
<translation type="unfinished"/>
</message>
<message>
<source>C&hoose</source>
<translation type="unfinished"/>
</message>
<message>
<source>Sending addresses</source>
<translation type="unfinished"/>
</message>
<message>
<source>Receiving addresses</source>
<translation type="unfinished"/>
</message>
<message>
<source>These are your Latinumcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<source>These are your Latinumcoin addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy &Label</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Edit</source>
<translation type="unfinished"/>
</message>
<message>
<source>Export Address List</source>
<translation type="unfinished"/>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>Fitxer separat per comes (*.csv)</translation>
</message>
<message>
<source>Exporting Failed</source>
<translation type="unfinished"/>
</message>
<message>
<source>There was an error trying to save the address list to %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>Address</source>
<translation>Adreça</translation>
</message>
<message>
<source>(no label)</source>
<translation>(sense etiqueta)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter passphrase</source>
<translation>Introduïu la frase-contrasenya</translation>
</message>
<message>
<source>New passphrase</source>
<translation>Nova frase-contrasenya</translation>
</message>
<message>
<source>Repeat new passphrase</source>
<translation>Repetiu la nova frase-contrasenya</translation>
</message>
<message>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Introduïu la nova frase-contrasenya per a la cartera.<br/>Empreu una frase-contrasenya de <b>10 o més caràcters aleatoris<b/>, o <b>vuit o més paraules<b/>.</translation>
</message>
<message>
<source>Encrypt wallet</source>
<translation>Encriptar cartera</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Cal que introduïu la frase-contrasenya de la cartera per a desbloquejar-la.</translation>
</message>
<message>
<source>Unlock wallet</source>
<translation>Desbloquejar cartera</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Cal que introduïu la frase-contrasenya de la cartera per a desencriptar-la.</translation>
</message>
<message>
<source>Decrypt wallet</source>
<translation>Desencriptar cartera</translation>
</message>
<message>
<source>Change passphrase</source>
<translation>Canviar frase-contrasenya</translation>
</message>
<message>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Introduïu l'antiga i la nova frase-contrasenya per a la cartera.</translation>
</message>
<message>
<source>Confirm wallet encryption</source>
<translation>Confirmeu l'encriptació de cartera</translation>
</message>
<message>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR LATINUMCOINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet encrypted</source>
<translation>Cartera encriptada</translation>
</message>
<message>
<source>Latinumcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your bitcoins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet encryption failed</source>
<translation>L'encriptació de cartera ha fallat</translation>
</message>
<message>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>L'encriptació de cartera ha fallat degut a un error intern. La vostra cartera no ha estat encriptada.</translation>
</message>
<message>
<source>The supplied passphrases do not match.</source>
<translation>Les frases-contrasenya no concorden.</translation>
</message>
<message>
<source>Wallet unlock failed</source>
<translation>El desbloqueig de cartera ha fallat</translation>
</message>
<message>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La frase-contrasenya per a la desencriptació de cartera és incorrecta.</translation>
</message>
<message>
<source>Wallet decryption failed</source>
<translation>La desencriptació de cartera ha fallat</translation>
</message>
<message>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>LatinumcoinGUI</name>
<message>
<source>Sign &message...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Synchronizing with network...</source>
<translation>Sincronitzant amb la xarxa...</translation>
</message>
<message>
<source>&Overview</source>
<translation>&Visió general</translation>
</message>
<message>
<source>Node</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show general overview of wallet</source>
<translation>Mostrar visió general de la cartera</translation>
</message>
<message>
<source>&Transactions</source>
<translation>&Transaccions</translation>
</message>
<message>
<source>Browse transaction history</source>
<translation>Exploreu l'historial de transaccions</translation>
</message>
<message>
<source>E&xit</source>
<translation type="unfinished"/>
</message>
<message>
<source>Quit application</source>
<translation>Sortir de l'aplicació</translation>
</message>
<message>
<source>Show information about Latinumcoin</source>
<translation>Informació sobre Latinumcoin</translation>
</message>
<message>
<source>About &Qt</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show information about Qt</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Options...</source>
<translation>&Opcions...</translation>
</message>
<message>
<source>&Encrypt Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Backup Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Change Passphrase...</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Sending addresses...</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Receiving addresses...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Open &URI...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Importing blocks from disk...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Reindexing blocks on disk...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Send coins to a Latinumcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Modify configuration options for Latinumcoin</source>
<translation type="unfinished"/>
</message>
<message>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<source>Change the passphrase used for wallet encryption</source>
<translation>Canviar frase-contrasenya per a l'escriptació de la cartera</translation>
</message>
<message>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Latinumcoin</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Show / Hide</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show or hide the main Window</source>
<translation type="unfinished"/>
</message>
<message>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished"/>
</message>
<message>
<source>Sign messages with your Latinumcoin addresses to prove you own them</source>
<translation type="unfinished"/>
</message>
<message>
<source>Verify messages to ensure they were signed with specified Latinumcoin addresses</source>
<translation type="unfinished"/>
</message>
<message>
<source>&File</source>
<translation>&Fitxer</translation>
</message>
<message>
<source>&Settings</source>
<translation>&Configuració</translation>
</message>
<message>
<source>&Help</source>
<translation>&Ajuda</translation>
</message>
<message>
<source>Tabs toolbar</source>
<translation>Barra d'eines</translation>
</message>
<message>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<source>Latinumcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<source>Request payments (generates QR codes and bitcoin: URIs)</source>
<translation type="unfinished"/>
</message>
<message>
<source>&About Latinumcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show the list of used sending addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show the list of used receiving addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<source>Open a bitcoin: URI or payment request</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show the Latinumcoin Core help message to get a list with possible Latinumcoin command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<source>Latinumcoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<source>%n active connection(s) to Latinumcoin network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<source>%1 and %2</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<source>%n year(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<source>Up to date</source>
<translation type="unfinished"/>
</message>
<message>
<source>Catching up...</source>
<translation>Actualitzant...</translation>
</message>
<message>
<source>Sent transaction</source>
<translation>Transacció enviada</translation>
</message>
<message>
<source>Incoming transaction</source>
<translation>Transacció entrant</translation>
</message>
<message>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>La cartera està <b>encriptada<b/> i <b>desbloquejada<b/></translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>La cartera està <b>encriptada<b/> i <b>bloquejada<b/></translation>
</message>
<message>
<source>A fatal error occurred. Latinumcoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<source>Network Alert</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<source>Coin Control Address Selection</source>
<translation type="unfinished"/>
</message>
<message>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Change:</source>
<translation type="unfinished"/>
</message>
<message>
<source>(un)select all</source>
<translation type="unfinished"/>
</message>
<message>
<source>Tree mode</source>
<translation type="unfinished"/>
</message>
<message>
<source>List mode</source>
<translation type="unfinished"/>
</message>
<message>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Address</source>
<translation>Adreça</translation>
</message>
<message>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<source>Confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<source>Priority</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<source>Lock unspent</source>
<translation type="unfinished"/>
</message>
<message>
<source>Unlock unspent</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<source>highest</source>
<translation type="unfinished"/>
</message>
<message>
<source>higher</source>
<translation type="unfinished"/>
</message>
<message>
<source>high</source>
<translation type="unfinished"/>
</message>
<message>
<source>medium-high</source>
<translation type="unfinished"/>
</message>
<message>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<source>low-medium</source>
<translation type="unfinished"/>
</message>
<message>
<source>low</source>
<translation type="unfinished"/>
</message>
<message>
<source>lower</source>
<translation type="unfinished"/>
</message>
<message>
<source>lowest</source>
<translation type="unfinished"/>
</message>
<message>
<source>(%1 locked)</source>
<translation type="unfinished"/>
</message>
<message>
<source>none</source>
<translation type="unfinished"/>
</message>
<message>
<source>Dust</source>
<translation type="unfinished"/>
</message>
<message>
<source>yes</source>
<translation type="unfinished"/>
</message>
<message>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<source>This label turns red, if the transaction size is greater than 1000 bytes.</source>
<translation type="unfinished"/>
</message>
<message>
<source>This means a fee of at least %1 per kB is required.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Can vary +/- 1 byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transactions with higher priority are more likely to get included into a block.</source>
<translation type="unfinished"/>
</message>
<message>
<source>This label turns red, if the priority is smaller than "medium".</source>
<translation type="unfinished"/>
</message>
<message>
<source>This label turns red, if any recipient receives an amount smaller than %1.</source>
<translation type="unfinished"/>
</message>
<message>
<source>This means a fee of at least %1 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Amounts below 0.546 times the minimum relay fee are shown as dust.</source>
<translation type="unfinished"/>
</message>
<message>
<source>This label turns red, if the change is smaller than %1.</source>
<translation type="unfinished"/>
</message>
<message>
<source>(no label)</source>
<translation>(sense etiqueta)</translation>
</message>
<message>
<source>change from %1 (%2)</source>
<translation type="unfinished"/>
</message>
<message>
<source>(change)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<source>Edit Address</source>
<translation>Editar adreça</translation>
</message>
<message>
<source>&Label</source>
<translation>&Etiqueta</translation>
</message><|fim▁hole|> <message>
<source>The label associated with this address list entry</source>
<translation type="unfinished"/>
</message>
<message>
<source>The address associated with this address list entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Address</source>
<translation>&Adreça</translation>
</message>
<message>
<source>New receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<source>New sending address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Edit receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Edit sending address</source>
<translation type="unfinished"/>
</message>
<message>
<source>The entered address "%1" is already in the address book.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The entered address "%1" is not a valid Latinumcoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Could not unlock wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<source>New key generation failed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>FreespaceChecker</name>
<message>
<source>A new data directory will be created.</source>
<translation type="unfinished"/>
</message>
<message>
<source>name</source>
<translation type="unfinished"/>
</message>
<message>
<source>Directory already exists. Add %1 if you intend to create a new directory here.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Path already exists, and is not a directory.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Cannot create data directory here.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>HelpMessageDialog</name>
<message>
<source>Latinumcoin Core - Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<source>Latinumcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set SSL root certificates for payment request (default: -system-)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Choose data directory on startup (default: 0)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>Intro</name>
<message>
<source>Welcome</source>
<translation type="unfinished"/>
</message>
<message>
<source>Welcome to Latinumcoin Core.</source>
<translation type="unfinished"/>
</message>
<message>
<source>As this is the first time the program is launched, you can choose where Latinumcoin Core will store its data.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Latinumcoin Core will download and store a copy of the Latinumcoin block chain. At least %1GB of data will be stored in this directory, and it will grow over time. The wallet will also be stored in this directory.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use the default data directory</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use a custom data directory:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Latinumcoin</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: Specified data directory "%1" can not be created.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<source>GB of free space available</source>
<translation type="unfinished"/>
</message>
<message>
<source>(of %1GB needed)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OpenURIDialog</name>
<message>
<source>Open URI</source>
<translation type="unfinished"/>
</message>
<message>
<source>Open payment request from URI or file</source>
<translation type="unfinished"/>
</message>
<message>
<source>URI:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Select payment request file</source>
<translation type="unfinished"/>
</message>
<message>
<source>Select payment request file to open</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<source>Options</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<source>Automatically start Latinumcoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Start Latinumcoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<source>Size of &database cache</source>
<translation type="unfinished"/>
</message>
<message>
<source>MB</source>
<translation type="unfinished"/>
</message>
<message>
<source>Number of script &verification threads</source>
<translation type="unfinished"/>
</message>
<message>
<source>Connect to the Latinumcoin network through a SOCKS proxy.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Connect through SOCKS proxy (default proxy):</source>
<translation type="unfinished"/>
</message>
<message>
<source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Active command-line options that override above options:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<source>(0 = auto, <0 = leave that many cores free)</source>
<translation type="unfinished"/>
</message>
<message>
<source>W&allet</source>
<translation type="unfinished"/>
</message>
<message>
<source>Expert</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enable coin &control features</source>
<translation type="unfinished"/>
</message>
<message>
<source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Spend unconfirmed change</source>
<translation type="unfinished"/>
</message>
<message>
<source>Automatically open the Latinumcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<source>The user interface language can be set here. This setting will take effect after restarting Latinumcoin.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Whether to show Latinumcoin addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"/>
</message>
<message>
<source>&OK</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Cancel</source>
<translation type="unfinished"/>
</message>
<message>
<source>default</source>
<translation type="unfinished"/>
</message>
<message>
<source>none</source>
<translation type="unfinished"/>
</message>
<message>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<source>Client restart required to activate changes.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Client will be shutdown, do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<source>This change would require a client restart.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Latinumcoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<source>Available:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Your current spendable balance</source>
<translation type="unfinished"/>
</message>
<message>
<source>Pending:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source>
<translation type="unfinished"/>
</message>
<message>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<source>Total:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Your current total balance</source>
<translation type="unfinished"/>
</message>
<message>
<source><b>Recent transactions</b></source>
<translation type="unfinished"/>
</message>
<message>
<source>out of sync</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<source>URI can not be parsed! This can be caused by an invalid Latinumcoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Requested payment amount of %1 is too small (considered dust).</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment request error</source>
<translation type="unfinished"/>
</message>
<message>
<source>Cannot start bitcoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
<message>
<source>Net manager warning</source>
<translation type="unfinished"/>
</message>
<message>
<source>Your active proxy doesn't support SOCKS5, which is required for payment requests via proxy.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment request fetch URL is invalid: %1</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment request file handling</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment request file can not be read or processed! This can be caused by an invalid payment request file.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Unverified payment requests to custom payment scripts are unsupported.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Refund from %1</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error communicating with %1: %2</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment request can not be parsed or processed!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Bad response from server %1</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment acknowledged</source>
<translation type="unfinished"/>
</message>
<message>
<source>Network request error</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QObject</name>
<message>
<source>Latinumcoin</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: Specified data directory "%1" does not exist.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: Cannot parse configuration file: %1. Only use key=value syntax.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: Invalid combination of -regtest and -testnet.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter a Latinumcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRImageWidget</name>
<message>
<source>&Save Image...</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Copy Image</source>
<translation type="unfinished"/>
</message>
<message>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<source>PNG Image (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<source>Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<source>General</source>
<translation type="unfinished"/>
</message>
<message>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<source>Name</source>
<translation type="unfinished"/>
</message>
<message>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Network Traffic</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Clear</source>
<translation type="unfinished"/>
</message>
<message>
<source>Totals</source>
<translation type="unfinished"/>
</message>
<message>
<source>In:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Out:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<source>Open the Latinumcoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<source>Welcome to the Latinumcoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 B</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 KB</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 MB</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 GB</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 m</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 h</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 h %2 m</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ReceiveCoinsDialog</name>
<message>
<source>&Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Label:</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Message:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Reuse one of the previously used receiving addresses. Reusing addresses has security and privacy issues. Do not use this unless re-generating a payment request made before.</source>
<translation type="unfinished"/>
</message>
<message>
<source>R&euse an existing receiving address (not recommended)</source>
<translation type="unfinished"/>
</message>
<message>
<source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Latinumcoin network.</source>
<translation type="unfinished"/>
</message>
<message>
<source>An optional label to associate with the new receiving address.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use this form to request payments. All fields are <b>optional</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Clear all fields of the form.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Clear</source>
<translation type="unfinished"/>
</message>
<message>
<source>Requested payments history</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Request payment</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show the selected request (does the same as double clicking an entry)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show</source>
<translation type="unfinished"/>
</message>
<message>
<source>Remove the selected entries from the list</source>
<translation type="unfinished"/>
</message>
<message>
<source>Remove</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy message</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ReceiveRequestDialog</name>
<message>
<source>QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy &URI</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy &Address</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Save Image...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Request payment to %1</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment information</source>
<translation type="unfinished"/>
</message>
<message>
<source>URI</source>
<translation type="unfinished"/>
</message>
<message>
<source>Address</source>
<translation>Adreça</translation>
</message>
<message>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RecentRequestsTableModel</name>
<message>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>(no label)</source>
<translation>(sense etiqueta)</translation>
</message>
<message>
<source>(no message)</source>
<translation type="unfinished"/>
</message>
<message>
<source>(no amount)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
<message>
<source>Coin Control Features</source>
<translation type="unfinished"/>
</message>
<message>
<source>Inputs...</source>
<translation type="unfinished"/>
</message>
<message>
<source>automatically selected</source>
<translation type="unfinished"/>
</message>
<message>
<source>Insufficient funds!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Change:</source>
<translation type="unfinished"/>
</message>
<message>
<source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Send to multiple recipients at once</source>
<translation type="unfinished"/>
</message>
<message>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<source>Clear all fields of the form.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Confirm the send action</source>
<translation type="unfinished"/>
</message>
<message>
<source>S&end</source>
<translation type="unfinished"/>
</message>
<message>
<source>Confirm send coins</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 to %2</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<source>Total Amount %1 (= %2)</source>
<translation type="unfinished"/>
</message>
<message>
<source>or</source>
<translation type="unfinished"/>
</message>
<message>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The amount to pay must be larger than 0.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<source>The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: Invalid Latinumcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<source>(no label)</source>
<translation>(sense etiqueta)</translation>
</message>
<message>
<source>Warning: Unknown change address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Are you sure you want to send?</source>
<translation type="unfinished"/>
</message>
<message>
<source>added as transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment request expired</source>
<translation type="unfinished"/>
</message>
<message>
<source>Invalid payment address %1</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<source>A&mount:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Pay &To:</source>
<translation type="unfinished"/>
</message>
<message>
<source>The address to send the payment to (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter a label for this address to add it to your address book</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Label:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Choose previously used address</source>
<translation type="unfinished"/>
</message>
<message>
<source>This is a normal payment.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<source>Remove this entry</source>
<translation type="unfinished"/>
</message>
<message>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<source>This is a verified payment request.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter a label for this address to add it to the list of used addresses</source>
<translation type="unfinished"/>
</message>
<message>
<source>A message that was attached to the bitcoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Latinumcoin network.</source>
<translation type="unfinished"/>
</message>
<message>
<source>This is an unverified payment request.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Pay To:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Memo:</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ShutdownWindow</name>
<message>
<source>Latinumcoin Core is shutting down...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Do not shut down the computer until this window disappears.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Choose previously used address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<source>Sign the message to prove you own this Latinumcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The address the message was signed with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Verify the message to ensure it was signed with the specified Latinumcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter a Latinumcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<source>Latinumcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<source>The Latinumcoin Core developers</source>
<translation type="unfinished"/>
</message>
<message>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<source>KB/s</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1/unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<source>%1 confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<source>label</source>
<translation type="unfinished"/>
</message>
<message>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<source>Merchant</source>
<translation type="unfinished"/>
</message>
<message>
<source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<source>, has not been successfully broadcast yet</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<source>unknown</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<source>Transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<source>This pane shows a detailed description of the transaction</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<source>Type</source>
<translation type="unfinished"/>
</message>
<message>
<source>Address</source>
<translation>Adreça</translation>
</message>
<message>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<source>Confirmed (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Generated but not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<source>Received from</source>
<translation type="unfinished"/>
</message>
<message>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<source>Payment to yourself</source>
<translation type="unfinished"/>
</message>
<message>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<source>(n/a)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Date and time that the transaction was received.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Type of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Destination address of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Amount removed from or added to balance.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<source>All</source>
<translation type="unfinished"/>
</message>
<message>
<source>Today</source>
<translation type="unfinished"/>
</message>
<message>
<source>This week</source>
<translation type="unfinished"/>
</message>
<message>
<source>This month</source>
<translation type="unfinished"/>
</message>
<message>
<source>Last month</source>
<translation type="unfinished"/>
</message>
<message>
<source>This year</source>
<translation type="unfinished"/>
</message>
<message>
<source>Range...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<source>To yourself</source>
<translation type="unfinished"/>
</message>
<message>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<source>Other</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter address or label to search</source>
<translation type="unfinished"/>
</message>
<message>
<source>Min amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<source>Edit label</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<source>Export Transaction History</source>
<translation type="unfinished"/>
</message>
<message>
<source>Exporting Failed</source>
<translation type="unfinished"/>
</message>
<message>
<source>There was an error trying to save the transaction history to %1.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Exporting Successful</source>
<translation type="unfinished"/>
</message>
<message>
<source>The transaction history was successfully saved to %1.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>Fitxer separat per comes (*.csv)</translation>
</message>
<message>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<source>Type</source>
<translation type="unfinished"/>
</message>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>Address</source>
<translation>Adreça</translation>
</message>
<message>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>ID</source>
<translation type="unfinished"/>
</message>
<message>
<source>Range:</source>
<translation type="unfinished"/>
</message>
<message>
<source>to</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletFrame</name>
<message>
<source>No wallet has been loaded.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<source>There was an error trying to save the wallet data to %1.</source>
<translation type="unfinished"/>
</message>
<message>
<source>The wallet data was successfully saved to %1.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Backup Successful</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<source>List commands</source>
<translation type="unfinished"/>
</message>
<message>
<source>Get help for a command</source>
<translation type="unfinished"/>
</message>
<message>
<source>Options:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Specify configuration file (default: bitcoin.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Specify pid file (default: bitcoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Specify data directory</source>
<translation type="unfinished"/>
</message>
<message>
<source>Listen for connections on <port> (default: 8333 or testnet: 18333)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<source>Listen for JSON-RPC connections on <port> (default: 8332 or testnet: 18332)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"/>
</message>
<message>
<source>Latinumcoin Core RPC client version</source>
<translation type="unfinished"/>
</message>
<message>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use the test network</source>
<translation type="unfinished"/>
</message>
<message>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=bitcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Latinumcoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<source>Continuously rate-limit free transactions to <n>*1000 bytes per minute (default:15)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: Listening for incoming connections failed (listen returned error %d)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Fees smaller than this are considered zero fee (for transaction creation) (default:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Flush database activity from memory pool to disk log every <n> megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<source>How thorough the block verification of -checkblocks is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<source>In this mode -genproclimit controls how many blocks are generated immediately.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set the processor limit for when generation is on (-1 = unlimited, default: -1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<source>Unable to bind to %s on this computer. Latinumcoin Core is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Latinumcoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<source>(default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>(default: wallet.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<source><category> can be:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<source>Latinumcoin Core Daemon</source>
<translation type="unfinished"/>
</message>
<message>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Clear list of wallet transactions (diagnostic tool; implies -rescan)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Connect through SOCKS proxy</source>
<translation type="unfinished"/>
</message>
<message>
<source>Connect to JSON-RPC on <port> (default: 8332 or testnet: 18332)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Connection options:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<source>Debugging/Testing options:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Disable safemode, override a real safe mode event (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Do not load the wallet and disable wallet RPC calls</source>
<translation type="unfinished"/>
</message>
<message>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error opening block database</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<source>Fee per kB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<source>Fees smaller than this are considered zero fee (for relaying) (default:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Force safe mode (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<source>If <category> is not supplied, output all debugging information.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Incorrect or no genesis block found. Wrong datadir for network?</source>
<translation type="unfinished"/>
</message>
<message>
<source>Invalid -onion address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Prepend debug output with timestamp (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>RPC client options:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<source>Select SOCKS version for -proxy (4 or 5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set database cache size in megabytes (%d to %d, default: %d)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set maximum block size in bytes (default: %d)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Spend unconfirmed change when sending transactions (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>This is intended for regression testing tools and app development.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Usage (deprecated, use bitcoin-cli):</source>
<translation type="unfinished"/>
</message>
<message>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wait for RPC server to start</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet %s resides outside data directory %s</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet options:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source>
<translation type="unfinished"/>
</message>
<message>
<source>You need to rebuild the database using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<source>Cannot obtain a lock on data directory %s. Latinumcoin Core is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<source>Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Output debugging information (default: 0, supplying <category> is optional)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: %d)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<source>Limit size of signature cache to <n> entries (default: 50000)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Log transaction priority and fee per kB when mining blocks (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Print block on startup, if found in block index</source>
<translation type="unfinished"/>
</message>
<message>
<source>Print block tree on startup (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>RPC SSL options: (see the Latinumcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<source>RPC server options:</source>
<translation type="unfinished"/>
</message>
<message>
<source>Randomly drop 1 of every <n> network messages</source>
<translation type="unfinished"/>
</message>
<message>
<source>Randomly fuzz 1 of every <n> network messages</source>
<translation type="unfinished"/>
</message>
<message>
<source>Run a thread to flush wallet periodically (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>SSL options: (see the Latinumcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Send command to Latinumcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Sets the DB_PRIVATE flag in the wallet db environment (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show all debugging options (usage: --help -help-debug)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Show benchmark information (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Start Latinumcoin Core Daemon</source>
<translation type="unfinished"/>
</message>
<message>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<source>Zapping all transactions from wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<source>on startup</source>
<translation type="unfinished"/>
</message>
<message>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"/>
</message>
<message>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"/>
</message>
<message>
<source>This help message</source>
<translation type="unfinished"/>
</message>
<message>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<source>Loading addresses...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error loading wallet.dat: Wallet requires newer version of Latinumcoin</source>
<translation type="unfinished"/>
</message>
<message>
<source>Wallet needed to be rewritten: restart Latinumcoin to complete</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<source>Loading block index...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<source>Loading wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<source>Rescanning...</source>
<translation type="unfinished"/>
</message>
<message>
<source>Done loading</source>
<translation type="unfinished"/>
</message>
<message>
<source>To use the %s option</source>
<translation type="unfinished"/>
</message>
<message>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
</context>
</TS><|fim▁end|> | |
<|file_name|>test_basic.py<|end_file_name|><|fim▁begin|>import warnings
from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy import exc as sa_exc, util, event
from sqlalchemy.orm import *
from sqlalchemy.orm.util import instance_str
from sqlalchemy.orm import exc as orm_exc, attributes
from sqlalchemy.testing.assertsql import AllOf, CompiledSQL, RegexSQL, Or
from sqlalchemy.sql import table, column
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy import inspect
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.testing.util import gc_collect
class O2MTest(fixtures.MappedTest):
"""deals with inheritance and one-to-many relationships"""
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(20)))
blub = Table('blub', metadata,
Column('id', Integer, ForeignKey('bar.id'), primary_key=True),
Column('foo_id', Integer, ForeignKey('foo.id'), nullable=False),
Column('blub_data', String(20)))
def test_basic(self):
class Foo(object):
def __init__(self, data=None):
self.data = data
def __repr__(self):
return "Foo id %d, data %s" % (self.id, self.data)
mapper(Foo, foo)
class Bar(Foo):
def __repr__(self):
return "Bar id %d, data %s" % (self.id, self.data)
mapper(Bar, bar, inherits=Foo)
class Blub(Bar):
def __repr__(self):
return "Blub id %d, data %s" % (self.id, self.data)
mapper(Blub, blub, inherits=Bar, properties={
'parent_foo':relationship(Foo)
})
sess = create_session()
b1 = Blub("blub #1")
b2 = Blub("blub #2")
f = Foo("foo #1")
sess.add(b1)
sess.add(b2)
sess.add(f)
b1.parent_foo = f
b2.parent_foo = f
sess.flush()
compare = ','.join([repr(b1), repr(b2), repr(b1.parent_foo),
repr(b2.parent_foo)])
sess.expunge_all()
l = sess.query(Blub).all()
result = ','.join([repr(l[0]), repr(l[1]),
repr(l[0].parent_foo), repr(l[1].parent_foo)])
eq_(compare, result)
eq_(l[0].parent_foo.data, 'foo #1')
eq_(l[1].parent_foo.data, 'foo #1')
class PolyExpressionEagerLoad(fixtures.DeclarativeMappedTest):
run_setup_mappers = 'once'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(fixtures.ComparableEntity, Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
discriminator = Column(String(50), nullable=False)
child_id = Column(Integer, ForeignKey('a.id'))
child = relationship('A')
p_a = case([
(discriminator == "a", "a"),
], else_="b")
__mapper_args__ = {
'polymorphic_identity': 'a',
"polymorphic_on": p_a,
}
class B(A):
__mapper_args__ = {
'polymorphic_identity': 'b'
}
@classmethod
def insert_data(cls):
A = cls.classes.A
session = Session(testing.db)
session.add_all([
A(id=1, discriminator='a'),
A(id=2, discriminator='b', child_id=1),
A(id=3, discriminator='c', child_id=1),
])
session.commit()
def test_joinedload(self):
A = self.classes.A
B = self.classes.B
session = Session(testing.db)
result = session.query(A).filter_by(child_id=None).\
options(joinedload('child')).one()
eq_(
result,
A(id=1, discriminator='a', child=[B(id=2), B(id=3)]),
)
class PolymorphicResolutionMultiLevel(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
run_setup_mappers = 'once'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
class C(A):
__tablename__ = 'c'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
class D(B):
__tablename__ = 'd'
id = Column(Integer, ForeignKey('b.id'), primary_key=True)
def test_ordered_b_d(self):
a_mapper = inspect(self.classes.A)
eq_(
a_mapper._mappers_from_spec(
[self.classes.B, self.classes.D], None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
def test_a(self):
a_mapper = inspect(self.classes.A)
eq_(
a_mapper._mappers_from_spec(
[self.classes.A], None),
[a_mapper]
)
def test_b_d_selectable(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.B]
eq_(
a_mapper._mappers_from_spec(
spec,
self.classes.B.__table__.join(self.classes.D.__table__)
),
[inspect(self.classes.B), inspect(self.classes.D)]
)
def test_d_selectable(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D]
eq_(
a_mapper._mappers_from_spec(
spec,
self.classes.B.__table__.join(self.classes.D.__table__)
),
[inspect(self.classes.D)]
)
def test_reverse_d_b(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.B]
eq_(
a_mapper._mappers_from_spec(
spec, None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
mappers, selectable = a_mapper._with_polymorphic_args(spec=spec)
self.assert_compile(selectable,
"a LEFT OUTER JOIN b ON a.id = b.id "
"LEFT OUTER JOIN d ON b.id = d.id")
def test_d_b_missing(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D]
eq_(
a_mapper._mappers_from_spec(
spec, None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
mappers, selectable = a_mapper._with_polymorphic_args(spec=spec)
self.assert_compile(selectable,
"a LEFT OUTER JOIN b ON a.id = b.id "
"LEFT OUTER JOIN d ON b.id = d.id")
def test_d_c_b(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.C, self.classes.B]
ms = a_mapper._mappers_from_spec(spec, None)
eq_(
ms[-1], inspect(self.classes.D)
)
eq_(ms[0], a_mapper)
eq_(
set(ms[1:3]), set(a_mapper._inheriting_mappers)
)
class PolymorphicOnNotLocalTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x', String(10)),
Column('q', String(10)))
t2 = Table('t2', metadata,
Column('t2id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('y', String(10)),
Column('xid', ForeignKey('t1.id')))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Child(Parent):
pass
def test_non_col_polymorphic_on(self):
Parent = self.classes.Parent
t2 = self.tables.t2
assert_raises_message(
sa_exc.ArgumentError,
"Can't determine polymorphic_on "
"value 'im not a column' - no "
"attribute is mapped to this name.",
mapper,
Parent, t2, polymorphic_on="im not a column"
)
def test_polymorphic_on_non_expr_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
def go():
interface_m = mapper(Parent, t2,
polymorphic_on=lambda:"hi",
polymorphic_identity=0)
assert_raises_message(
sa_exc.ArgumentError,
"Only direct column-mapped property or "
"SQL expression can be passed for polymorphic_on",
go
)
def test_polymorphic_on_not_present_col(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
def go():
t1t2_join_2 = select([t1.c.q], from_obj=[t1.join(t2)]).alias()
interface_m = mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join_2),
polymorphic_identity=0)
assert_raises_message(
sa_exc.InvalidRequestError,
"Could not map polymorphic_on column 'x' to the mapped table - "
"polymorphic loads will not function properly",
go
)
def test_polymorphic_on_only_in_with_poly(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
# if its in the with_polymorphic, then its OK
mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join),
polymorphic_identity=0)
def test_polymorpic_on_not_in_with_poly(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
# if with_polymorphic, but its not present, not OK
def go():
t1t2_join_2 = select([t1.c.q], from_obj=[t1.join(t2)]).alias()
interface_m = mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join_2),
polymorphic_identity=0)
assert_raises_message(
sa_exc.InvalidRequestError,
"Could not map polymorphic_on column 'x' "
"to the mapped table - "
"polymorphic loads will not function properly",
go
)
def test_polymorphic_on_expr_explicit_map(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, properties={
"discriminator":column_property(expr)
}, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_no_label_joined(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_w_label_joined(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
]).label(None)
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_no_label_single(self):
"""test that single_table_criterion is propagated
with a standalone expr"""
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_w_label_single(self):
"""test that single_table_criterion is propagated
with a standalone expr"""
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
]).label(None)
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_column_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
cprop = column_property(expr)
mapper(Parent, t1, properties={
"discriminator":cprop
}, polymorphic_identity="parent",
polymorphic_on=cprop)
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_column_str_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
cprop = column_property(expr)
mapper(Parent, t1, properties={
"discriminator":cprop
}, polymorphic_identity="parent",
polymorphic_on="discriminator")
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_synonym(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
cprop = column_property(t1.c.x)
assert_raises_message(
sa_exc.ArgumentError,
"Only direct column-mapped property or "
"SQL expression can be passed for polymorphic_on",
mapper, Parent, t1, properties={
"discriminator":cprop,
"discrim_syn":synonym(cprop)
}, polymorphic_identity="parent",
polymorphic_on="discrim_syn")
def _roundtrip(self, set_event=True, parent_ident='parent', child_ident='child'):
Parent, Child = self.classes.Parent, self.classes.Child
# locate the "polymorphic_on" ColumnProperty. This isn't
# "officially" stored at the moment so do some heuristics to find it.
parent_mapper = inspect(Parent)
for prop in parent_mapper.column_attrs:
if not prop.instrument:
break
else:
prop = parent_mapper._columntoproperty[
parent_mapper.polymorphic_on]
# then make sure the column we will query on matches.
is_(
parent_mapper.polymorphic_on,
prop.columns[0]
)
if set_event:
@event.listens_for(Parent, "init", propagate=True)
def set_identity(instance, *arg, **kw):
ident = object_mapper(instance).polymorphic_identity
if ident == 'parent':
instance.x = parent_ident
elif ident == 'child':
instance.x = child_ident
else:
assert False, "Got unexpected identity %r" % ident
s = Session(testing.db)
s.add_all([
Parent(q="p1"),
Child(q="c1", y="c1"),
Parent(q="p2"),
])
s.commit()
s.close()
eq_(
[type(t) for t in s.query(Parent).order_by(Parent.id)],
[Parent, Child, Parent]
)
eq_(
[type(t) for t in s.query(Child).all()],
[Child]
)
class SortOnlyOnImportantFKsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('b_id', Integer,
ForeignKey('b.id', use_alter=True, name='b'))
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
@classmethod
def setup_classes(cls):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
b_id = Column(Integer, ForeignKey('b.id'))
class B(A):
__tablename__ = "b"
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
__mapper_args__ = {'inherit_condition': id == A.id}
cls.classes.A = A
cls.classes.B = B
def test_flush(self):
s = Session(testing.db)
s.add(self.classes.B())
s.flush()
class FalseDiscriminatorTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', Boolean, nullable=False))
def test_false_on_sub(self):
class Foo(object):
pass
class Bar(Foo):
pass
mapper(Foo, t1, polymorphic_on=t1.c.type, polymorphic_identity=True)
mapper(Bar, inherits=Foo, polymorphic_identity=False)
sess = create_session()
b1 = Bar()
sess.add(b1)
sess.flush()
assert b1.type is False
sess.expunge_all()
assert isinstance(sess.query(Foo).one(), Bar)
def test_false_on_base(self):
class Ding(object):pass
class Bat(Ding):pass
mapper(Ding, t1, polymorphic_on=t1.c.type, polymorphic_identity=False)
mapper(Bat, inherits=Ding, polymorphic_identity=True)
sess = create_session()
d1 = Ding()
sess.add(d1)
sess.flush()
assert d1.type is False
sess.expunge_all()
assert sess.query(Ding).one() is not None
class PolymorphicSynonymTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1, t2
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10), nullable=False),
Column('info', String(255)))
t2 = Table('t2', metadata,
Column('id', Integer, ForeignKey('t1.id'),
primary_key=True),
Column('data', String(10), nullable=False))
def test_polymorphic_synonym(self):
class T1(fixtures.ComparableEntity):
def info(self):
return "THE INFO IS:" + self._info
def _set_info(self, x):
self._info = x
info = property(info, _set_info)
class T2(T1):pass
mapper(T1, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1',
properties={
'info':synonym('_info', map_column=True)
})
mapper(T2, t2, inherits=T1, polymorphic_identity='t2')
sess = create_session()
at1 = T1(info='at1')
at2 = T2(info='at2', data='t2 data')
sess.add(at1)
sess.add(at2)
sess.flush()
sess.expunge_all()
eq_(sess.query(T2).filter(T2.info=='at2').one(), at2)
eq_(at2.info, "THE INFO IS:at2")
class PolymorphicAttributeManagementTest(fixtures.MappedTest):
"""Test polymorphic_on can be assigned, can be mirrored, etc."""
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('table_a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('class_name', String(50))
)
Table('table_b', metadata,
Column('id', Integer, ForeignKey('table_a.id'),
primary_key=True),
Column('class_name', String(50)),
)
Table('table_c', metadata,
Column('id', Integer, ForeignKey('table_b.id'),
primary_key=True),
Column('data', String(10))
)
@classmethod
def setup_classes(cls):
table_b, table_c, table_a = (cls.tables.table_b,
cls.tables.table_c,
cls.tables.table_a)
class A(cls.Basic):
pass
class B(A):
pass
class C(B):
pass
class D(B):
pass
mapper(A, table_a,
polymorphic_on=table_a.c.class_name,
polymorphic_identity='a')
mapper(B, table_b, inherits=A,
polymorphic_on=table_b.c.class_name,
polymorphic_identity='b',
properties=dict(class_name=[table_a.c.class_name, table_b.c.class_name]))
mapper(C, table_c, inherits=B,
polymorphic_identity='c')
mapper(D, inherits=B,
polymorphic_identity='d')
def test_poly_configured_immediate(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
a = A()
b = B()
c = C()
eq_(a.class_name, 'a')
eq_(b.class_name, 'b')
eq_(c.class_name, 'c')
def test_base_class(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
assert isinstance(sess.query(B).first(), C)
sess.close()
assert isinstance(sess.query(A).first(), C)
def test_valid_assignment_upwards(self):
"""test that we can assign 'd' to a B, since B/D
both involve the same set of tables.
"""
D, B = self.classes.D, self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'd'
sess.add(b1)
sess.commit()
sess.close()
assert isinstance(sess.query(B).first(), D)
def test_invalid_assignment_downwards(self):
"""test that we warn on assign of 'b' to a C, since this adds
a row to the C table we'd never load.
"""
C = self.classes.C
sess = Session()
c1 = C()
c1.class_name = 'b'
sess.add(c1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'b'; the object may not "
"refresh and/or load correctly" % instance_str(c1),
sess.flush
)
def test_invalid_assignment_upwards(self):
"""test that we warn on assign of 'c' to a B, since we will have a
"C" row that has no joined row, which will cause object
deleted errors.
"""
B = self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'c'
sess.add(b1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'c'; the object may not "
"refresh and/or load correctly" % instance_str(b1),
sess.flush
)
def test_entirely_oob_assignment(self):
"""test warn on an unknown polymorphic identity.
"""
B = self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'xyz'
sess.add(b1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'xyz'; the object may not "
"refresh and/or load correctly" % instance_str(b1),
sess.flush
)
def test_not_set_on_upate(self):
C = self.classes.C
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
sess.expire(c1)
c1.data = 'foo'
sess.flush()
def test_validate_on_upate(self):
C = self.classes.C
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
sess.expire(c1)
c1.class_name = 'b'
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'b'; the object may not "
"refresh and/or load correctly" % instance_str(c1),
sess.flush
)
class CascadeTest(fixtures.MappedTest):
"""that cascades on polymorphic relationships continue
cascading along the path of the instance's mapper, not
the base mapper."""
@classmethod
def define_tables(cls, metadata):
global t1, t2, t3, t4
t1= Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
t2 = Table('t2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('t1id', Integer, ForeignKey('t1.id')),
Column('type', String(30)),
Column('data', String(30))
)
t3 = Table('t3', metadata,
Column('id', Integer, ForeignKey('t2.id'),
primary_key=True),
Column('moredata', String(30)))
t4 = Table('t4', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('t3id', Integer, ForeignKey('t3.id')),
Column('data', String(30)))
def test_cascade(self):
class T1(fixtures.BasicEntity):
pass
class T2(fixtures.BasicEntity):
pass
class T3(T2):
pass
class T4(fixtures.BasicEntity):
pass
mapper(T1, t1, properties={
't2s':relationship(T2, cascade="all")
})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2')
mapper(T3, t3, inherits=T2, polymorphic_identity='t3', properties={
't4s':relationship(T4, cascade="all")
})
mapper(T4, t4)
sess = create_session()
t1_1 = T1(data='t1')
t3_1 = T3(data ='t3', moredata='t3')
t2_1 = T2(data='t2')
t1_1.t2s.append(t2_1)
t1_1.t2s.append(t3_1)
t4_1 = T4(data='t4')
t3_1.t4s.append(t4_1)
sess.add(t1_1)
assert t4_1 in sess.new
sess.flush()
sess.delete(t1_1)
assert t4_1 in sess.deleted
sess.flush()
class M2OUseGetTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(30))
)
Table('sub', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
)
Table('related', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('sub_id', Integer, ForeignKey('sub.id')),
)
def test_use_get(self):
base, sub, related = (self.tables.base,
self.tables.sub,
self.tables.related)
# test [ticket:1186]
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class Related(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='b')
mapper(Sub, sub, inherits=Base, polymorphic_identity='s')
mapper(Related, related, properties={
# previously, this was needed for the comparison to occur:
# the 'primaryjoin' looks just like "Sub"'s "get" clause (based on the Base id),
# and foreign_keys since that join condition doesn't actually have any fks in it
#'sub':relationship(Sub, primaryjoin=base.c.id==related.c.sub_id, foreign_keys=related.c.sub_id)
# now we can use this:
'sub':relationship(Sub)
})
assert class_mapper(Related).get_property('sub').strategy.use_get
sess = create_session()
s1 = Sub()
r1 = Related(sub=s1)
sess.add(r1)
sess.flush()
sess.expunge_all()
r1 = sess.query(Related).first()
s1 = sess.query(Sub).first()
def go():
assert r1.sub
self.assert_sql_count(testing.db, go, 0)
class GetTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(30)),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(20)))
blub = Table('blub', metadata,
Column('blub_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('foo_id', Integer, ForeignKey('foo.id')),
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('blub_data', String(20)))
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(Foo):
pass
class Blub(Bar):
pass
def test_get_polymorphic(self):
self._do_get_test(True)
def test_get_nonpolymorphic(self):
self._do_get_test(False)
def _do_get_test(self, polymorphic):
foo, Bar, Blub, blub, bar, Foo = (self.tables.foo,
self.classes.Bar,
self.classes.Blub,
self.tables.blub,
self.tables.bar,
self.classes.Foo)
if polymorphic:
mapper(Foo, foo, polymorphic_on=foo.c.type, polymorphic_identity='foo')
mapper(Bar, bar, inherits=Foo, polymorphic_identity='bar')
mapper(Blub, blub, inherits=Bar, polymorphic_identity='blub')
else:
mapper(Foo, foo)
mapper(Bar, bar, inherits=Foo)
mapper(Blub, blub, inherits=Bar)
sess = create_session()
f = Foo()
b = Bar()
bl = Blub()
sess.add(f)
sess.add(b)
sess.add(bl)
sess.flush()
if polymorphic:
def go():
assert sess.query(Foo).get(f.id) is f
assert sess.query(Foo).get(b.id) is b
assert sess.query(Foo).get(bl.id) is bl
assert sess.query(Bar).get(b.id) is b
assert sess.query(Bar).get(bl.id) is bl
assert sess.query(Blub).get(bl.id) is bl
# test class mismatches - item is present
# in the identity map but we requested a subclass
assert sess.query(Blub).get(f.id) is None
assert sess.query(Blub).get(b.id) is None
assert sess.query(Bar).get(f.id) is None
self.assert_sql_count(testing.db, go, 0)
else:
# this is testing the 'wrong' behavior of using get()
# polymorphically with mappers that are not configured to be
# polymorphic. the important part being that get() always
# returns an instance of the query's type.
def go():
assert sess.query(Foo).get(f.id) is f
bb = sess.query(Foo).get(b.id)
assert isinstance(b, Foo) and bb.id==b.id
bll = sess.query(Foo).get(bl.id)
assert isinstance(bll, Foo) and bll.id==bl.id
assert sess.query(Bar).get(b.id) is b
bll = sess.query(Bar).get(bl.id)
assert isinstance(bll, Bar) and bll.id == bl.id
assert sess.query(Blub).get(bl.id) is bl
self.assert_sql_count(testing.db, go, 3)
class EagerLazyTest(fixtures.MappedTest):
"""tests eager load/lazy load of child items off inheritance mappers, tests that
LazyLoader constructs the right query condition."""
@classmethod
def define_tables(cls, metadata):
global foo, bar, bar_foo
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(30)))
bar_foo = Table('bar_foo', metadata,
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('foo_id', Integer, ForeignKey('foo.id'))
)
def test_basic(self):
class Foo(object): pass
class Bar(Foo): pass
foos = mapper(Foo, foo)
bars = mapper(Bar, bar, inherits=foos)
bars.add_property('lazy', relationship(foos, bar_foo, lazy='select'))
bars.add_property('eager', relationship(foos, bar_foo, lazy='joined'))
foo.insert().execute(data='foo1')
bar.insert().execute(id=1, data='bar1')
foo.insert().execute(data='foo2')
bar.insert().execute(id=2, data='bar2')
foo.insert().execute(data='foo3') #3
foo.insert().execute(data='foo4') #4
bar_foo.insert().execute(bar_id=1, foo_id=3)
bar_foo.insert().execute(bar_id=2, foo_id=4)
sess = create_session()
q = sess.query(Bar)
self.assert_(len(q.first().lazy) == 1)
self.assert_(len(q.first().eager) == 1)
class EagerTargetingTest(fixtures.MappedTest):
"""test a scenario where joined table inheritance might be
confused as an eagerly loaded joined table."""
@classmethod
def define_tables(cls, metadata):
Table('a_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('type', String(30), nullable=False),
Column('parent_id', Integer, ForeignKey('a_table.id'))
)
Table('b_table', metadata,
Column('id', Integer, ForeignKey('a_table.id'), primary_key=True),
Column('b_data', String(50)),
)
def test_adapt_stringency(self):
b_table, a_table = self.tables.b_table, self.tables.a_table
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(A, a_table, polymorphic_on=a_table.c.type, polymorphic_identity='A',
properties={
'children': relationship(A, order_by=a_table.c.name)
})
mapper(B, b_table, inherits=A, polymorphic_identity='B', properties={
'b_derived':column_property(b_table.c.b_data + "DATA")
})
sess=create_session()
b1=B(id=1, name='b1',b_data='i')
sess.add(b1)
sess.flush()
b2=B(id=2, name='b2', b_data='l', parent_id=1)
sess.add(b2)
sess.flush()
bid=b1.id
sess.expunge_all()
node = sess.query(B).filter(B.id==bid).all()[0]
eq_(node, B(id=1, name='b1',b_data='i'))
eq_(node.children[0], B(id=2, name='b2',b_data='l'))
sess.expunge_all()
node = sess.query(B).options(joinedload(B.children)).filter(B.id==bid).all()[0]
eq_(node, B(id=1, name='b1',b_data='i'))
eq_(node.children[0], B(id=2, name='b2',b_data='l'))
class FlushTest(fixtures.MappedTest):
"""test dependency sorting among inheriting mappers"""
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('email', String(128)),
Column('password', String(16)),
)
Table('roles', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('description', String(32))
)
Table('user_roles', metadata,
Column('user_id', Integer, ForeignKey('users.id'), primary_key=True),
Column('role_id', Integer, ForeignKey('roles.id'), primary_key=True)
)
Table('admins', metadata,
Column('admin_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id'))
)
def test_one(self):
admins, users, roles, user_roles = (self.tables.admins,
self.tables.users,
self.tables.roles,
self.tables.user_roles)
class User(object):pass
class Role(object):pass
class Admin(User):pass
role_mapper = mapper(Role, roles)
user_mapper = mapper(User, users, properties = {
'roles' : relationship(Role, secondary=user_roles, lazy='joined')
}
)
admin_mapper = mapper(Admin, admins, inherits=user_mapper)
sess = create_session()
adminrole = Role()
sess.add(adminrole)
sess.flush()
# create an Admin, and append a Role. the dependency processors
# corresponding to the "roles" attribute for the Admin mapper and the User mapper
# have to ensure that two dependency processors don't fire off and insert the
# many to many row twice.
a = Admin()
a.roles.append(adminrole)
a.password = 'admin'
sess.add(a)
sess.flush()
eq_(select([func.count('*')]).select_from(user_roles).scalar(), 1)
def test_two(self):
admins, users, roles, user_roles = (self.tables.admins,
self.tables.users,
self.tables.roles,
self.tables.user_roles)
class User(object):
def __init__(self, email=None, password=None):
self.email = email
self.password = password
class Role(object):
def __init__(self, description=None):
self.description = description
class Admin(User):pass
role_mapper = mapper(Role, roles)
user_mapper = mapper(User, users, properties = {
'roles' : relationship(Role, secondary=user_roles, lazy='joined')
}
)
admin_mapper = mapper(Admin, admins, inherits=user_mapper)
# create roles
adminrole = Role('admin')
sess = create_session()
sess.add(adminrole)
sess.flush()
# create admin user
a = Admin(email='tim', password='admin')
a.roles.append(adminrole)
sess.add(a)
sess.flush()
a.password = 'sadmin'
sess.flush()
eq_(select([func.count('*')]).select_from(user_roles).scalar(), 1)
class PassiveDeletesTest(fixtures.MappedTest):
__requires__ = ('foreign_keys',)
@classmethod
def define_tables(cls, metadata):
Table(
"a", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(30))
)
Table(
"b", metadata,
Column(
'id', Integer, ForeignKey('a.id', ondelete="CASCADE"),
primary_key=True),
Column('data', String(10))
)
Table(
"c", metadata,
Column('cid', Integer, primary_key=True),
Column('bid', ForeignKey('b.id', ondelete="CASCADE"))
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(B):
pass
def _fixture(self, a_p=False, b_p=False, c_p=False):
A, B, C = self.classes("A", "B", "C")
a, b, c = self.tables("a", "b", "c")
mapper(
A, a, passive_deletes=a_p,
polymorphic_on=a.c.type, polymorphic_identity='a')
mapper(
B, b, inherits=A, passive_deletes=b_p, polymorphic_identity='b')
mapper(
C, c, inherits=B, passive_deletes=c_p, polymorphic_identity='c')
def test_none(self):
A, B, C = self.classes("A", "B", "C")
self._fixture()
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(B).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
RegexSQL(
"SELECT .* "
"FROM c WHERE :param_1 = c.bid",
[{'param_1': 3}]
),
CompiledSQL(
"DELETE FROM c WHERE c.cid = :cid",
[{'cid': 1}]
),
CompiledSQL(
"DELETE FROM b WHERE b.id = :id",
[{'id': 3}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 3}]
)
)
def test_c_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(c_p=True)
s = Session()<|fim▁hole|> s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{'param_1': 1}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 1}]
)
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM b WHERE b.id = :id",
[{'id': 2}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 2}]
)
)
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(A).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM b WHERE b.id = :id",
[{'id': 3}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 3}]
)
)
def test_b_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(b_p=True)
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{'param_1': 1}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 1}]
)
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 2}]
)
)
c1.id
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 3}]
)
)
def test_a_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(a_p=True)
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{'param_1': 1}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 1}]
)
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 2}]
)
)
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(A).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 3}]
)
)
class OptimizedGetOnDeferredTest(fixtures.MappedTest):
"""test that the 'optimized get' path accommodates deferred columns."""
@classmethod
def define_tables(cls, metadata):
Table(
"a", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table(
"b", metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('data', String(10))
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
@classmethod
def setup_mappers(cls):
A, B = cls.classes("A", "B")
a, b = cls.tables("a", "b")
mapper(A, a)
mapper(B, b, inherits=A, properties={
'data': deferred(b.c.data),
'expr': column_property(b.c.data + 'q', deferred=True)
})
def test_column_property(self):
A, B = self.classes("A", "B")
sess = Session()
b1 = B(data='x')
sess.add(b1)
sess.flush()
eq_(b1.expr, 'xq')
def test_expired_column(self):
A, B = self.classes("A", "B")
sess = Session()
b1 = B(data='x')
sess.add(b1)
sess.flush()
sess.expire(b1, ['data'])
eq_(b1.data, 'x')
class JoinedNoFKSortingTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("a", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table("b", metadata,
Column('id', Integer, primary_key=True)
)
Table("c", metadata,
Column('id', Integer, primary_key=True)
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(A):
pass
@classmethod
def setup_mappers(cls):
A, B, C = cls.classes.A, cls.classes.B, cls.classes.C
mapper(A, cls.tables.a)
mapper(B, cls.tables.b, inherits=A,
inherit_condition=cls.tables.a.c.id == cls.tables.b.c.id,
inherit_foreign_keys=cls.tables.b.c.id)
mapper(C, cls.tables.c, inherits=A,
inherit_condition=cls.tables.a.c.id == cls.tables.c.c.id,
inherit_foreign_keys=cls.tables.c.c.id)
def test_ordering(self):
B, C = self.classes.B, self.classes.C
sess = Session()
sess.add_all([B(), C(), B(), C()])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
AllOf(
CompiledSQL(
"INSERT INTO b (id) VALUES (:id)",
[{"id": 1}, {"id": 3}]
),
CompiledSQL(
"INSERT INTO c (id) VALUES (:id)",
[{"id": 2}, {"id": 4}]
)
)
)
class VersioningTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('version_id', Integer, nullable=False),
Column('value', String(40)),
Column('discriminator', Integer, nullable=False)
)
Table('subtable', metadata,
Column('id', None, ForeignKey('base.id'), primary_key=True),
Column('subdata', String(50))
)
Table('stuff', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent', Integer, ForeignKey('base.id'))
)
@testing.emits_warning(r".*updated rowcount")
@engines.close_open_connections
def test_save_update(self):
subtable, base, stuff = (self.tables.subtable,
self.tables.base,
self.tables.stuff)
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class Stuff(Base):
pass
mapper(Stuff, stuff)
mapper(Base, base,
polymorphic_on=base.c.discriminator,
version_id_col=base.c.version_id,
polymorphic_identity=1, properties={
'stuff':relationship(Stuff)
})
mapper(Sub, subtable, inherits=Base, polymorphic_identity=2)
sess = create_session()
b1 = Base(value='b1')
s1 = Sub(value='sub1', subdata='some subdata')
sess.add(b1)
sess.add(s1)
sess.flush()
sess2 = create_session()
s2 = sess2.query(Base).get(s1.id)
s2.subdata = 'sess2 subdata'
s1.subdata = 'sess1 subdata'
sess.flush()
assert_raises(orm_exc.StaleDataError,
sess2.query(Base).with_lockmode('read').get,
s1.id)
if not testing.db.dialect.supports_sane_rowcount:
sess2.flush()
else:
assert_raises(orm_exc.StaleDataError, sess2.flush)
sess2.refresh(s2)
if testing.db.dialect.supports_sane_rowcount:
assert s2.subdata == 'sess1 subdata'
s2.subdata = 'sess2 subdata'
sess2.flush()
@testing.emits_warning(r".*(update|delete)d rowcount")
def test_delete(self):
subtable, base = self.tables.subtable, self.tables.base
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base,
polymorphic_on=base.c.discriminator,
version_id_col=base.c.version_id, polymorphic_identity=1)
mapper(Sub, subtable, inherits=Base, polymorphic_identity=2)
sess = create_session()
b1 = Base(value='b1')
s1 = Sub(value='sub1', subdata='some subdata')
s2 = Sub(value='sub2', subdata='some other subdata')
sess.add(b1)
sess.add(s1)
sess.add(s2)
sess.flush()
sess2 = create_session()
s3 = sess2.query(Base).get(s1.id)
sess2.delete(s3)
sess2.flush()
s2.subdata = 'some new subdata'
sess.flush()
s1.subdata = 'some new subdata'
if testing.db.dialect.supports_sane_rowcount:
assert_raises(
orm_exc.StaleDataError,
sess.flush
)
else:
sess.flush()
class DistinctPKTest(fixtures.MappedTest):
"""test the construction of mapper.primary_key when an inheriting relationship
joins on a column other than primary key column."""
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
global person_table, employee_table, Person, Employee
person_table = Table("persons", metadata,
Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
Column("name", String(80)),
)
employee_table = Table("employees", metadata,
Column("eid", Integer, primary_key=True, test_needs_autoincrement=True),
Column("salary", Integer),
Column("person_id", Integer, ForeignKey("persons.id")),
)
class Person(object):
def __init__(self, name):
self.name = name
class Employee(Person): pass
@classmethod
def insert_data(cls):
person_insert = person_table.insert()
person_insert.execute(id=1, name='alice')
person_insert.execute(id=2, name='bob')
employee_insert = employee_table.insert()
employee_insert.execute(id=2, salary=250, person_id=1) # alice
employee_insert.execute(id=3, salary=200, person_id=2) # bob
def test_implicit(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper)
assert list(class_mapper(Employee).primary_key) == [person_table.c.id]
def test_explicit_props(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper,
properties={'pid':person_table.c.id,
'eid':employee_table.c.eid})
self._do_test(False)
def test_explicit_composite_pk(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table,
inherits=person_mapper,
properties=dict(id=[employee_table.c.eid, person_table.c.id]),
primary_key=[person_table.c.id, employee_table.c.eid])
assert_raises_message(sa_exc.SAWarning,
r"On mapper Mapper\|Employee\|employees, "
"primary key column 'persons.id' is being "
"combined with distinct primary key column 'employees.eid' "
"in attribute 'id'. Use explicit properties to give "
"each column its own mapped attribute name.",
self._do_test, True
)
def test_explicit_pk(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper, primary_key=[person_table.c.id])
self._do_test(False)
def _do_test(self, composite):
session = create_session()
query = session.query(Employee)
if composite:
alice1 = query.get([1,2])
bob = query.get([2,3])
alice2 = query.get([1,2])
else:
alice1 = query.get(1)
bob = query.get(2)
alice2 = query.get(1)
assert alice1.name == alice2.name == 'alice'
assert bob.name == 'bob'
class SyncCompileTest(fixtures.MappedTest):
"""test that syncrules compile properly on custom inherit conds"""
@classmethod
def define_tables(cls, metadata):
global _a_table, _b_table, _c_table
_a_table = Table('a', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data1', String(128))
)
_b_table = Table('b', metadata,
Column('a_id', Integer, ForeignKey('a.id'), primary_key=True),
Column('data2', String(128))
)
_c_table = Table('c', metadata,
# Column('a_id', Integer, ForeignKey('b.a_id'), primary_key=True), #works
Column('b_a_id', Integer, ForeignKey('b.a_id'), primary_key=True),
Column('data3', String(128))
)
def test_joins(self):
for j1 in (None, _b_table.c.a_id==_a_table.c.id, _a_table.c.id==_b_table.c.a_id):
for j2 in (None, _b_table.c.a_id==_c_table.c.b_a_id,
_c_table.c.b_a_id==_b_table.c.a_id):
self._do_test(j1, j2)
for t in reversed(_a_table.metadata.sorted_tables):
t.delete().execute().close()
def _do_test(self, j1, j2):
class A(object):
def __init__(self, **kwargs):
for key, value in list(kwargs.items()):
setattr(self, key, value)
class B(A):
pass
class C(B):
pass
mapper(A, _a_table)
mapper(B, _b_table, inherits=A,
inherit_condition=j1
)
mapper(C, _c_table, inherits=B,
inherit_condition=j2
)
session = create_session()
a = A(data1='a1')
session.add(a)
b = B(data1='b1', data2='b2')
session.add(b)
c = C(data1='c1', data2='c2', data3='c3')
session.add(c)
session.flush()
session.expunge_all()
assert len(session.query(A).all()) == 3
assert len(session.query(B).all()) == 2
assert len(session.query(C).all()) == 1
class OverrideColKeyTest(fixtures.MappedTest):
"""test overriding of column attributes."""
@classmethod
def define_tables(cls, metadata):
global base, subtable, subtable_two
base = Table('base', metadata,
Column('base_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(255)),
Column('sqlite_fixer', String(10))
)
subtable = Table('subtable', metadata,
Column('base_id', Integer, ForeignKey('base.base_id'), primary_key=True),
Column('subdata', String(255))
)
subtable_two = Table('subtable_two', metadata,
Column('base_id', Integer, primary_key=True),
Column('fk_base_id', Integer, ForeignKey('base.base_id')),
Column('subdata', String(255))
)
def test_plain(self):
# control case
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
# Sub gets a "base_id" property using the "base_id"
# column of both tables.
eq_(
class_mapper(Sub).get_property('base_id').columns,
[subtable.c.base_id, base.c.base_id]
)
def test_override_explicit(self):
# this pattern is what you see when using declarative
# in particular, here we do a "manual" version of
# what we'd like the mapper to do.
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base, properties={
# this is the manual way to do it, is not really
# possible in declarative
'id':[base.c.base_id, subtable.c.base_id]
})
eq_(
class_mapper(Sub).get_property('id').columns,
[base.c.base_id, subtable.c.base_id]
)
s1 = Sub()
s1.id = 10
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).get(10) is s1
def test_override_onlyinparent(self):
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base)
eq_(
class_mapper(Sub).get_property('id').columns,
[base.c.base_id]
)
eq_(
class_mapper(Sub).get_property('base_id').columns,
[subtable.c.base_id]
)
s1 = Sub()
s1.id = 10
s2 = Sub()
s2.base_id = 15
sess = create_session()
sess.add_all([s1, s2])
sess.flush()
# s1 gets '10'
assert sess.query(Sub).get(10) is s1
# s2 gets a new id, base_id is overwritten by the ultimate
# PK col
assert s2.id == s2.base_id != 15
def test_override_implicit(self):
# this is originally [ticket:1111].
# the pattern here is now disallowed by [ticket:1892]
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
def go():
mapper(Sub, subtable, inherits=Base, properties={
'id':subtable.c.base_id
})
# Sub mapper compilation needs to detect that "base.c.base_id"
# is renamed in the inherited mapper as "id", even though
# it has its own "id" property. It then generates
# an exception in 0.7 due to the implicit conflict.
assert_raises(sa_exc.InvalidRequestError, go)
def test_pk_fk_different(self):
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base)
def go():
mapper(Sub, subtable_two, inherits=Base)
assert_raises_message(
sa_exc.SAWarning,
"Implicitly combining column base.base_id with "
"column subtable_two.base_id under attribute 'base_id'",
go
)
def test_plain_descriptor(self):
"""test that descriptors prevent inheritance from propigating properties to subclasses."""
class Base(object):
pass
class Sub(Base):
@property
def data(self):
return "im the data"
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
s1 = Sub()
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_custom_descriptor(self):
"""test that descriptors prevent inheritance from propigating properties to subclasses."""
class MyDesc(object):
def __get__(self, instance, owner):
if instance is None:
return self
return "im the data"
class Base(object):
pass
class Sub(Base):
data = MyDesc()
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
s1 = Sub()
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_sub_columns_over_base_descriptors(self):
class Base(object):
@property
def subdata(self):
return "this is base"
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
sess = create_session()
b1 = Base()
assert b1.subdata == "this is base"
s1 = Sub()
s1.subdata = "this is sub"
assert s1.subdata == "this is sub"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.query(Base).get(b1.base_id).subdata == "this is base"
assert sess.query(Sub).get(s1.base_id).subdata == "this is sub"
def test_base_descriptors_over_base_cols(self):
class Base(object):
@property
def data(self):
return "this is base"
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
sess = create_session()
b1 = Base()
assert b1.data == "this is base"
s1 = Sub()
assert s1.data == "this is base"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.query(Base).get(b1.base_id).data == "this is base"
assert sess.query(Sub).get(s1.base_id).data == "this is base"
class OptimizedLoadTest(fixtures.MappedTest):
"""tests for the "optimized load" routine."""
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50)),
Column('type', String(50)),
Column('counter', Integer, server_default="1")
)
Table('sub', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('sub', String(50)),
Column('subcounter', Integer, server_default="1"),
Column('subcounter2', Integer, server_default="1")
)
Table('subsub', metadata,
Column('id', Integer, ForeignKey('sub.id'), primary_key=True),
Column('subsubcounter2', Integer, server_default="1")
)
Table('with_comp', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('a', String(10)),
Column('b', String(10))
)
def test_no_optimize_on_map_to_join(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class JoinBase(fixtures.ComparableEntity):
pass
class SubJoinBase(JoinBase):
pass
mapper(Base, base)
mapper(JoinBase, base.outerjoin(sub), properties=util.OrderedDict(
[('id', [base.c.id, sub.c.id]),
('counter', [base.c.counter, sub.c.subcounter])])
)
mapper(SubJoinBase, inherits=JoinBase)
sess = Session()
sess.add(Base(data='data'))
sess.commit()
sjb = sess.query(SubJoinBase).one()
sjb_id = sjb.id
sess.expire(sjb)
# this should not use the optimized load,
# which assumes discrete tables
def go():
eq_(sjb.data, 'data')
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT base.id AS base_id, sub.id AS sub_id, "
"base.counter AS base_counter, sub.subcounter AS sub_subcounter, "
"base.data AS base_data, base.type AS base_type, "
"sub.sub AS sub_sub, sub.subcounter2 AS sub_subcounter2 "
"FROM base LEFT OUTER JOIN sub ON base.id = sub.id "
"WHERE base.id = :param_1",
{'param_1': sjb_id}
),
)
def test_optimized_passes(self):
""""test that the 'optimized load' routine doesn't crash when
a column in the join condition is not available."""
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
# redefine Sub's "id" to favor the "id" col in the subtable.
# "id" is also part of the primary join condition
mapper(Sub, sub, inherits=Base,
polymorphic_identity='sub',
properties={'id':[sub.c.id, base.c.id]})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
sess.commit()
sess.expunge_all()
# load s1 via Base. s1.id won't populate since it's relative to
# the "sub" table. The optimized load kicks in and tries to
# generate on the primary join, but cannot since "id" is itself unloaded.
# the optimized load needs to return "None" so regular full-row loading proceeds
s1 = sess.query(Base).first()
assert s1.sub == 's1sub'
def test_column_expression(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={
'concat': column_property(sub.c.sub + "|" + sub.c.sub)
})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
sess.commit()
sess.expunge_all()
s1 = sess.query(Base).first()
assert s1.concat == 's1sub|s1sub'
def test_column_expression_joined(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={
'concat': column_property(base.c.data + "|" + sub.c.sub)
})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
s2 = Sub(data='s2data', sub='s2sub')
s3 = Sub(data='s3data', sub='s3sub')
sess.add_all([s1, s2, s3])
sess.commit()
sess.expunge_all()
# query a bunch of rows to ensure there's no cartesian
# product against "base" occurring, it is in fact
# detecting that "base" needs to be in the join
# criterion
eq_(
sess.query(Base).order_by(Base.id).all(),
[
Sub(data='s1data', sub='s1sub', concat='s1data|s1sub'),
Sub(data='s2data', sub='s2sub', concat='s2data|s2sub'),
Sub(data='s3data', sub='s3sub', concat='s3data|s3sub')
]
)
def test_composite_column_joined(self):
base, with_comp = self.tables.base, self.tables.with_comp
class Base(fixtures.BasicEntity):
pass
class WithComp(Base):
pass
class Comp(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __composite_values__(self):
return self.a, self.b
def __eq__(self, other):
return (self.a == other.a) and (self.b == other.b)
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(WithComp, with_comp, inherits=Base, polymorphic_identity='wc', properties={
'comp': composite(Comp, with_comp.c.a, with_comp.c.b)
})
sess = sessionmaker()()
s1 = WithComp(data='s1data', comp=Comp('ham', 'cheese'))
s2 = WithComp(data='s2data', comp=Comp('bacon', 'eggs'))
sess.add_all([s1, s2])
sess.commit()
sess.expunge_all()
s1test, s2test = sess.query(Base).order_by(Base.id).all()
assert s1test.comp
assert s2test.comp
eq_(s1test.comp, Comp('ham', 'cheese'))
eq_(s2test.comp, Comp('bacon', 'eggs'))
def test_load_expired_on_pending(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
sess = Session()
s1 = Sub(data='s1')
sess.add(s1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO base (data, type) VALUES (:data, :type)",
[{'data':'s1','type':'sub'}]
),
CompiledSQL(
"INSERT INTO sub (id, sub) VALUES (:id, :sub)",
lambda ctx:{'id':s1.id, 'sub':None}
),
)
def go():
eq_( s1.subcounter2, 1 )
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT base.counter AS base_counter, sub.subcounter AS sub_subcounter, "
"sub.subcounter2 AS sub_subcounter2 FROM base JOIN sub "
"ON base.id = sub.id WHERE base.id = :param_1",
lambda ctx:{'param_1': s1.id}
),
)
def test_dont_generate_on_none(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type,
polymorphic_identity='base')
m = mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
s1 = Sub()
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is None
# loads s1.id as None
eq_(s1.id, None)
# this now will come up with a value of None for id - should reject
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is None
s1.id = 1
attributes.instance_state(s1)._commit_all(s1.__dict__, None)
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is not None
def test_load_expired_on_pending_twolevel(self):
base, sub, subsub = (self.tables.base,
self.tables.sub,
self.tables.subsub)
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class SubSub(Sub):
pass
mapper(Base, base, polymorphic_on=base.c.type,
polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
mapper(SubSub, subsub, inherits=Sub, polymorphic_identity='subsub')
sess = Session()
s1 = SubSub(data='s1', counter=1, subcounter=2)
sess.add(s1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO base (data, type, counter) VALUES "
"(:data, :type, :counter)",
[{'data':'s1','type':'subsub','counter':1}]
),
CompiledSQL(
"INSERT INTO sub (id, sub, subcounter) VALUES "
"(:id, :sub, :subcounter)",
lambda ctx:[{'subcounter': 2, 'sub': None, 'id': s1.id}]
),
CompiledSQL(
"INSERT INTO subsub (id) VALUES (:id)",
lambda ctx:{'id':s1.id}
),
)
def go():
eq_(
s1.subcounter2, 1
)
self.assert_sql_execution(
testing.db,
go,
Or(
CompiledSQL(
"SELECT subsub.subsubcounter2 AS subsub_subsubcounter2, "
"sub.subcounter2 AS sub_subcounter2 FROM subsub, sub "
"WHERE :param_1 = sub.id AND sub.id = subsub.id",
lambda ctx: {'param_1': s1.id}
),
CompiledSQL(
"SELECT sub.subcounter2 AS sub_subcounter2, "
"subsub.subsubcounter2 AS subsub_subsubcounter2 "
"FROM sub, subsub "
"WHERE :param_1 = sub.id AND sub.id = subsub.id",
lambda ctx: {'param_1': s1.id}
),
)
)
class TransientInheritingGCTest(fixtures.TestBase):
__requires__ = ('cpython', 'no_coverage')
def _fixture(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
data = Column(String(10))
self.A = A
return Base
def setUp(self):
self.Base = self._fixture()
def tearDown(self):
self.Base.metadata.drop_all(testing.db)
#clear_mappers()
self.Base = None
def _do_test(self, go):
B = go()
self.Base.metadata.create_all(testing.db)
sess = Session(testing.db)
sess.add(B(data='some b'))
sess.commit()
b1 = sess.query(B).one()
assert isinstance(b1, B)
sess.close()
del sess
del b1
del B
gc_collect()
eq_(
len(self.A.__subclasses__()),
0)
def test_single(self):
def go():
class B(self.A):
pass
return B
self._do_test(go)
@testing.fails_if(lambda: True,
"not supported for joined inh right now.")
def test_joined(self):
def go():
class B(self.A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'),
primary_key=True)
return B
self._do_test(go)
class NoPKOnSubTableWarningTest(fixtures.TestBase):
def _fixture(self):
metadata = MetaData()
parent = Table('parent', metadata,
Column('id', Integer, primary_key=True)
)
child = Table('child', metadata,
Column('id', Integer, ForeignKey('parent.id'))
)
return parent, child
def tearDown(self):
clear_mappers()
def test_warning_on_sub(self):
parent, child = self._fixture()
class P(object):
pass
class C(P):
pass
mapper(P, parent)
assert_raises_message(
sa_exc.SAWarning,
"Could not assemble any primary keys for locally mapped "
"table 'child' - no rows will be persisted in this Table.",
mapper, C, child, inherits=P
)
def test_no_warning_with_explicit(self):
parent, child = self._fixture()
class P(object):
pass
class C(P):
pass
mapper(P, parent)
mc = mapper(C, child, inherits=P, primary_key=[parent.c.id])
eq_(mc.primary_key, (parent.c.id,))
class InhCondTest(fixtures.TestBase):
def test_inh_cond_nonexistent_table_unrelated(self):
metadata = MetaData()
base_table = Table("base", metadata,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("owner_id", Integer, ForeignKey("owner.owner_id"))
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# succeeds, despite "owner" table not configured yet
m2 = mapper(Derived, derived_table,
inherits=Base)
assert m2.inherit_condition.compare(
base_table.c.id==derived_table.c.id
)
def test_inh_cond_nonexistent_col_unrelated(self):
m = MetaData()
base_table = Table("base", m,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m,
Column("id", Integer, ForeignKey('base.id'),
primary_key=True),
Column('order_id', Integer, ForeignKey('order.foo'))
)
order_table = Table('order', m, Column('id', Integer, primary_key=True))
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# succeeds, despite "order.foo" doesn't exist
m2 = mapper(Derived, derived_table, inherits=Base)
assert m2.inherit_condition.compare(
base_table.c.id==derived_table.c.id
)
def test_inh_cond_no_fk(self):
metadata = MetaData()
base_table = Table("base", metadata,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", metadata,
Column("id", Integer, primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
assert_raises_message(
sa_exc.ArgumentError,
"Can't find any foreign key relationships between "
"'base' and 'derived'.",
mapper,
Derived, derived_table, inherits=Base
)
def test_inh_cond_nonexistent_table_related(self):
m1 = MetaData()
m2 = MetaData()
base_table = Table("base", m1,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m2,
Column("id", Integer, ForeignKey('base.id'),
primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# the ForeignKey def is correct but there are two
# different metadatas. Would like the traditional
# "noreferencedtable" error to raise so that the
# user is directed towards the FK definition in question.
assert_raises_message(
sa_exc.NoReferencedTableError,
"Foreign key associated with column 'derived.id' "
"could not find table 'base' with which to generate "
"a foreign key to target column 'id'",
mapper,
Derived, derived_table, inherits=Base
)
def test_inh_cond_nonexistent_col_related(self):
m = MetaData()
base_table = Table("base", m,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m,
Column("id", Integer, ForeignKey('base.q'),
primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
assert_raises_message(
sa_exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey "
"'base.q' on table "
"'derived': table 'base' has no column named 'q'",
mapper,
Derived, derived_table, inherits=Base
)
class PKDiscriminatorTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
parents = Table('parents', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(60)))
children = Table('children', metadata,
Column('id', Integer, ForeignKey('parents.id'),
primary_key=True),
Column('type', Integer,primary_key=True),
Column('name', String(60)))
def test_pk_as_discriminator(self):
parents, children = self.tables.parents, self.tables.children
class Parent(object):
def __init__(self, name=None):
self.name = name
class Child(object):
def __init__(self, name=None):
self.name = name
class A(Child):
pass
mapper(Parent, parents, properties={
'children': relationship(Child, backref='parent'),
})
mapper(Child, children, polymorphic_on=children.c.type,
polymorphic_identity=1)
mapper(A, inherits=Child, polymorphic_identity=2)
s = create_session()
p = Parent('p1')
a = A('a1')
p.children.append(a)
s.add(p)
s.flush()
assert a.id
assert a.type == 2
p.name='p1new'
a.name='a1new'
s.flush()
s.expire_all()
assert a.name=='a1new'
assert p.name=='p1new'
class NoPolyIdentInMiddleTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(B):
pass
class D(B):
pass
class E(A):
pass
@classmethod
def setup_mappers(cls):
A, C, B, E, D, base = (cls.classes.A,
cls.classes.C,
cls.classes.B,
cls.classes.E,
cls.classes.D,
cls.tables.base)
mapper(A, base, polymorphic_on=base.c.type)
mapper(B, inherits=A, )
mapper(C, inherits=B, polymorphic_identity='c')
mapper(D, inherits=B, polymorphic_identity='d')
mapper(E, inherits=A, polymorphic_identity='e')
def test_load_from_middle(self):
C, B = self.classes.C, self.classes.B
s = Session()
s.add(C())
o = s.query(B).first()
eq_(o.type, 'c')
assert isinstance(o, C)
def test_load_from_base(self):
A, C = self.classes.A, self.classes.C
s = Session()
s.add(C())
o = s.query(A).first()
eq_(o.type, 'c')
assert isinstance(o, C)
def test_discriminator(self):
C, B, base = (self.classes.C,
self.classes.B,
self.tables.base)
assert class_mapper(B).polymorphic_on is base.c.type
assert class_mapper(C).polymorphic_on is base.c.type
def test_load_multiple_from_middle(self):
C, B, E, D, base = (self.classes.C,
self.classes.B,
self.classes.E,
self.classes.D,
self.tables.base)
s = Session()
s.add_all([C(), D(), E()])
eq_(
s.query(B).order_by(base.c.type).all(),
[C(), D()]
)
class DeleteOrphanTest(fixtures.MappedTest):
"""Test the fairly obvious, that an error is raised
when attempting to insert an orphan.
Previous SQLA versions would check this constraint
in memory which is the original rationale for this test.
"""
@classmethod
def define_tables(cls, metadata):
global single, parent
single = Table('single', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(50), nullable=False),
Column('data', String(50)),
Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False),
)
parent = Table('parent', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50))
)
def test_orphan_message(self):
class Base(fixtures.BasicEntity):
pass
class SubClass(Base):
pass
class Parent(fixtures.BasicEntity):
pass
mapper(Base, single, polymorphic_on=single.c.type, polymorphic_identity='base')
mapper(SubClass, inherits=Base, polymorphic_identity='sub')
mapper(Parent, parent, properties={
'related':relationship(Base, cascade="all, delete-orphan")
})
sess = create_session()
s1 = SubClass(data='s1')
sess.add(s1)
assert_raises(sa_exc.DBAPIError, sess.flush)
class PolymorphicUnionTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def _fixture(self):
t1 = table('t1', column('c1', Integer),
column('c2', Integer),
column('c3', Integer))
t2 = table('t2', column('c1', Integer), column('c2', Integer),
column('c3', Integer),
column('c4', Integer))
t3 = table('t3', column('c1', Integer),
column('c3', Integer),
column('c5', Integer))
return t1, t2, t3
def test_type_col_present(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
'q1'
),
"SELECT t1.c1, t1.c2, t1.c3, CAST(NULL AS INTEGER) AS c4, "
"CAST(NULL AS INTEGER) AS c5, 'a' AS q1 FROM t1 UNION ALL "
"SELECT t2.c1, t2.c2, t2.c3, t2.c4, CAST(NULL AS INTEGER) AS c5, "
"'b' AS q1 FROM t2 UNION ALL SELECT t3.c1, "
"CAST(NULL AS INTEGER) AS c2, t3.c3, CAST(NULL AS INTEGER) AS c4, "
"t3.c5, 'c' AS q1 FROM t3"
)
def test_type_col_non_present(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
None
),
"SELECT t1.c1, t1.c2, t1.c3, CAST(NULL AS INTEGER) AS c4, "
"CAST(NULL AS INTEGER) AS c5 FROM t1 UNION ALL SELECT t2.c1, "
"t2.c2, t2.c3, t2.c4, CAST(NULL AS INTEGER) AS c5 FROM t2 "
"UNION ALL SELECT t3.c1, CAST(NULL AS INTEGER) AS c2, t3.c3, "
"CAST(NULL AS INTEGER) AS c4, t3.c5 FROM t3"
)
def test_no_cast_null(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
'q1', cast_nulls=False
),
"SELECT t1.c1, t1.c2, t1.c3, NULL AS c4, NULL AS c5, 'a' AS q1 "
"FROM t1 UNION ALL SELECT t2.c1, t2.c2, t2.c3, t2.c4, NULL AS c5, "
"'b' AS q1 FROM t2 UNION ALL SELECT t3.c1, NULL AS c2, t3.c3, "
"NULL AS c4, t3.c5, 'c' AS q1 FROM t3"
)
class NameConflictTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
content = Table('content', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30))
)
foo = Table('foo', metadata,
Column('id', Integer, ForeignKey('content.id'),
primary_key=True),
Column('content_type', String(30))
)
def test_name_conflict(self):
class Content(object):
pass
class Foo(Content):
pass
mapper(Content, self.tables.content,
polymorphic_on=self.tables.content.c.type)
mapper(Foo, self.tables.foo, inherits=Content,
polymorphic_identity='foo')
sess = create_session()
f = Foo()
f.content_type = 'bar'
sess.add(f)
sess.flush()
f_id = f.id
sess.expunge_all()
assert sess.query(Content).get(f_id).content_type == 'bar'<|fim▁end|> | a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1]) |
<|file_name|>memory_stats_test.js<|end_file_name|><|fim▁begin|>'use strict';
var MemoryStats = require('../../src/models/memory_stats')<|fim▁hole|> , chaiAsPromised = require('chai-as-promised');
chai.use(chaiAsPromised);
describe('MemoryStats', function() {
describe('.constructor', function() {
it('returns an instantiated memory stats and its schema attributes', function() {
expect(MemoryStats().schemaAttrs).to.include.members(['container_name', 'timestamp_day']);
});
it('returns an instantiated memory stats and its table name', function() {
expect(MemoryStats().tableName).to.eql('memory_stats');
});
});
after(function() {
return SQliteAdapter.deleteDB()
.then(null)
.catch(function(err) {
console.log(err.stack);
});
});
});<|fim▁end|> | , SQliteAdapter = require('../../src/models/sqlite_adapter')
, chai = require('chai')
, expect = chai.expect |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.