prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>userListContainer.js<|end_file_name|><|fim▁begin|>import React from 'react'
import PropTypes from 'prop-types'
import { connect } from 'react-redux'
import { bindActionCreators } from 'redux'
import { selectUser } from '../../../store/actions'
import {
PagingState,
SortingState,
} from '@devexpress/dx-react-grid'
import {
Grid,
TableView,
TableHeaderRow,
PagingPanel,
} from '@devexpress/dx-react-grid-bootstrap3'
const URL = 'https://js.devexpress.com/Demos/WidgetsGallery/data/orderItems'
class UserList extends React.Component {
constructor (props) {
super(props)
this.state = {
columns: [
{ name: 'OrderNumber', title: 'Order #', align: 'right' },
{ name: 'OrderDate', title: 'Order Date' },
{ name: 'StoreCity', title: 'Store City' },
{ name: 'StoreState', title: 'Store State' },
{ name: 'Employee', title: 'Employee' },
{ name: 'SaleAmount', title: 'Sale Amount', align: 'right' },
],
rows: [],
sorting: [{ columnName: 'StoreCity', direction: 'asc' }],
totalCount: 0,
pageSize: 10,
allowedPageSizes: [5, 10, 15],
currentPage: 0,
// loading: true,
}
this.changeSorting = this.changeSorting.bind(this)
this.changeCurrentPage = this.changeCurrentPage.bind(this)
this.changePageSize = this.changePageSize.bind(this)
}
componentDidMount () {
this.loadData()
}
componentDidUpdate () {
this.loadData()
}
changeSorting (sorting) {
this.setState({
// loading: true,
sorting,
})
}
changeCurrentPage (currentPage) {
this.setState({
// loading: true,
currentPage,
})
}
changePageSize (pageSize) {
const totalPages = Math.ceil(this.state.totalCount / pageSize)
const currentPage = Math.min(this.state.currentPage, totalPages - 1)
this.setState({
// loading: true,<|fim▁hole|> pageSize,
currentPage,
})
}
queryString () {
const { sorting, pageSize, currentPage } = this.state
let queryString = `${URL}?take=${pageSize}&skip=${pageSize * currentPage}`
const columnSorting = sorting[0]
if (columnSorting) {
const sortingDirectionString = columnSorting.direction === 'desc' ? ' desc' : ''
queryString = `${queryString}&orderby=${columnSorting.columnName}${sortingDirectionString}`
}
return queryString
}
loadData () {
const queryString = this.queryString()
if (queryString === this.lastQuery) {
// this.setState({ loading: false })
return
}
fetch(queryString)
.then(response => response.json())
.then(data => {
console.log(data.items.length)
this.setState({
rows: data.items,
totalCount: data.totalCount,
// loading: false,
})
})
.catch(() => this.setState({
// loading: false
}))
this.lastQuery = queryString
}
createTableItems () {
return this.props.users.map((user, i) => {
return (
<tr
onClick={() => this.props.selectUser(user)}
key={i}>
<th>{user.id}</th>
<th>{user.name}</th>
<th>{user.born}</th>
<th>{user.description}</th>
<th><img src={user.image} alt='' /></th>
</tr>
)
})
}
render () {
const {
rows,
columns,
sorting,
pageSize,
allowedPageSizes,
currentPage,
totalCount,
loading,
} = this.state
return (
<div style={{ position: 'relative' }}>
<Grid
rows={rows}
columns={columns}
>
<SortingState
sorting={sorting}
onSortingChange={this.changeSorting}
/>
<PagingState
currentPage={currentPage}
onCurrentPageChange={this.changeCurrentPage}
pageSize={pageSize}
onPageSizeChange={this.changePageSize}
totalCount={totalCount}
/>
<TableView
tableCellTemplate={({ row, column }) => {
if (column.name === 'SaleAmount') {
return (
<td style={{ textAlign: 'right' }}>${row.SaleAmount}</td>
)
}
return undefined
}}
tableNoDataCellTemplate={({ colspan }) => (
<td
style={{
textAlign: 'center',
padding: '40px 0',
}}
colSpan={colspan}
>
<big className='text-muted'>{loading ? '' : 'No data'}</big>
</td>
)}
/>
<TableHeaderRow allowSorting />
<PagingPanel
allowedPageSizes={allowedPageSizes}
/>
</Grid>
</div>
)
}
}
UserList.propTypes = {
users: PropTypes.array,
selectUser: PropTypes.func
}
const mapStateToProps = (state) => ({
users: state.users
})
const matchDispatchToProps = (dispatch) => {
return bindActionCreators({ selectUser: selectUser }, dispatch)
}
export default connect(mapStateToProps, matchDispatchToProps)(UserList)<|fim▁end|> | |
<|file_name|>snmpv3.rs<|end_file_name|><|fim▁begin|>use crate::rparser::*;
use crate::snmp::parse_pdu_enveloppe_version;
use crate::{gen_get_variants, Variant};
use snmp_parser::{parse_snmp_v3, SecurityModel};
pub struct SNMPv3Builder {}
impl RBuilder for SNMPv3Builder {
fn build(&self) -> Box<dyn RParser> {
Box::new(SNMPv3Parser::new(b"SNMPv3"))
}
fn get_l4_probe(&self) -> Option<ProbeL4> {
Some(snmpv3_probe)
}
}
pub struct SNMPv3Parser<'a> {
_name: Option<&'a [u8]>,
version: u8,
req_flags: u8,
security_model: SecurityModel,
}
impl<'a> From<SecurityModel> for Variant<'a> {
fn from(input: SecurityModel) -> Self {
input.0.into()
}
}
impl<'a> SNMPv3Parser<'a> {
pub fn new(name: &'a [u8]) -> SNMPv3Parser<'a> {
SNMPv3Parser {
_name: Some(name),
version: 0,
req_flags: 0,
security_model: SecurityModel(0),
}
}
}
impl<'a> RParser for SNMPv3Parser<'a> {
fn parse_l4(&mut self, data: &[u8], _direction: Direction) -> ParseResult {
match parse_snmp_v3(data) {
Ok((_rem, r)) => {
debug!("parse_snmp_v3: {:?}", r);
self.version = r.version as u8;
self.req_flags = r.header_data.msg_flags;
self.security_model = r.header_data.msg_security_model;
ParseResult::Ok
}
e => {
warn!("parse_snmp_v3 failed: {:?}", e);<|fim▁hole|> }
}
}
gen_get_variants! {SNMPv3Parser, "snmpv3.",
version => into,
encrypted => |s| { Some(Variant::Bool(s.req_flags & 0b010 != 0)) },
security_model => into,
}
}
pub fn snmpv3_probe(i: &[u8], _l4info: &L4Info) -> ProbeResult {
if i.len() <= 2 {
return ProbeResult::NotForUs;
}
match parse_pdu_enveloppe_version(i) {
Some(3) => ProbeResult::Certain,
_ => ProbeResult::NotForUs,
}
}<|fim▁end|> | ParseResult::Error |
<|file_name|>resilient_backpropagation.py<|end_file_name|><|fim▁begin|>from ..tools import add_bias, confirm
from ..activation_functions import softmax_function
from ..cost_functions import softmax_neg_loss
import numpy as np
def resilient_backpropagation(network, trainingset, testset, cost_function, ERROR_LIMIT=1e-3, max_iterations = (), weight_step_max = 50., weight_step_min = 0., start_step = 0.5, learn_max = 1.2, learn_min = 0.5, print_rate = 1000, save_trained_network = False ):
# Implemented according to iRprop+
# http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf
assert softmax_function != network.layers[-1][1] or cost_function == softmax_neg_loss,\
"When using the `softmax` activation function, the cost function MUST be `softmax_neg_loss`."
assert cost_function != softmax_neg_loss or softmax_function == network.layers[-1][1],\
"When using the `softmax_neg_loss` cost function, the activation function in the final layer MUST be `softmax`."
assert trainingset[0].features.shape[0] == network.n_inputs, \
"ERROR: input size varies from the defined input setting"
assert trainingset[0].targets.shape[0] == network.layers[-1][0], \
"ERROR: output size varies from the defined output setting"
training_data = np.array( [instance.features for instance in trainingset ] )
training_targets = np.array( [instance.targets for instance in trainingset ] )
test_data = np.array( [instance.features for instance in testset ] )
test_targets = np.array( [instance.targets for instance in testset ] )
# Storing the current / previous weight step size
weight_step = [ np.full( weight_layer.shape, start_step ) for weight_layer in network.weights ]
# Storing the current / previous weight update
dW = [ np.ones(shape=weight_layer.shape) for weight_layer in network.weights ]
# Storing the previous derivative
previous_dEdW = [ 1 ] * len( network.weights )
# Storing the previous error measurement
prev_error = ( ) # inf
input_signals, derivatives = network.update( training_data, trace=True )
out = input_signals[-1]
cost_derivative = cost_function(out, training_targets, derivative=True).T
delta = cost_derivative * derivatives[-1]
error = cost_function(network.update( test_data ), test_targets )
n_samples = float(training_data.shape[0])
layer_indexes = range( len(network.layers) )[::-1] # reversed
epoch = 0
while error > ERROR_LIMIT and epoch < max_iterations:
epoch += 1
for i in layer_indexes:
# Loop over the weight layers in reversed order to calculate the deltas
# Calculate the delta with respect to the weights
dEdW = (np.dot( delta, add_bias(input_signals[i]) )/n_samples).T
if i != 0:
"""Do not calculate the delta unnecessarily."""
# Skip the bias weight
weight_delta = np.dot( network.weights[ i ][1:,:], delta )
# Calculate the delta for the subsequent layer
delta = weight_delta * derivatives[i-1]
# Calculate sign changes and note where they have changed
diffs = np.multiply( dEdW, previous_dEdW[i] )
pos_indexes = np.where( diffs > 0 )
neg_indexes = np.where( diffs < 0 )
zero_indexes = np.where( diffs == 0 )<|fim▁hole|> if np.any(pos_indexes):
# Calculate the weight step size
weight_step[i][pos_indexes] = np.minimum( weight_step[i][pos_indexes] * learn_max, weight_step_max )
# Calculate the weight step direction
dW[i][pos_indexes] = np.multiply( -np.sign( dEdW[pos_indexes] ), weight_step[i][pos_indexes] )
# Apply the weight deltas
network.weights[i][ pos_indexes ] += dW[i][pos_indexes]
# negative
if np.any(neg_indexes):
weight_step[i][neg_indexes] = np.maximum( weight_step[i][neg_indexes] * learn_min, weight_step_min )
if error > prev_error:
# iRprop+ version of resilient backpropagation
network.weights[i][ neg_indexes ] -= dW[i][neg_indexes] # backtrack
dEdW[ neg_indexes ] = 0
# zeros
if np.any(zero_indexes):
dW[i][zero_indexes] = np.multiply( -np.sign( dEdW[zero_indexes] ), weight_step[i][zero_indexes] )
network.weights[i][ zero_indexes ] += dW[i][zero_indexes]
# Store the previous weight step
previous_dEdW[i] = dEdW
#end weight adjustment loop
prev_error = error
input_signals, derivatives = network.update( training_data, trace=True )
out = input_signals[-1]
cost_derivative = cost_function(out, training_targets, derivative=True).T
delta = cost_derivative * derivatives[-1]
error = cost_function(network.update( test_data ), test_targets )
if epoch%print_rate==0:
# Show the current training status
print "[training] Current error:", error, "\tEpoch:", epoch
print "[training] Finished:"
print "[training] Converged to error bound (%.4g) with error %.4g." % ( ERROR_LIMIT, error )
print "[training] Measured quality: %.4g" % network.measure_quality( training_data, training_targets, cost_function )
print "[training] Trained for %d epochs." % epoch
if save_trained_network and confirm( promt = "Do you wish to store the trained network?" ):
network.save_network_to_file()
# end backprop<|fim▁end|> |
# positive |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>// Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
extern crate build;
fn main() {
build::link("wiautil", true)<|fim▁hole|><|fim▁end|> | } |
<|file_name|>bubble.py<|end_file_name|><|fim▁begin|>from __future__ import print_function
import numpy as np
import sys
import mesh.patch as patch
import compressible_sr.eos as eos
from util import msg
def init_data(my_data, rp):
""" initialize the bubble problem """
msg.bold("initializing the bubble problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in bubble.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
gamma = rp.get_param("eos.gamma")
grav = rp.get_param("compressible.grav")
scale_height = rp.get_param("bubble.scale_height")
dens_base = rp.get_param("bubble.dens_base")
dens_cutoff = rp.get_param("bubble.dens_cutoff")
x_pert = rp.get_param("bubble.x_pert")
y_pert = rp.get_param("bubble.y_pert")
r_pert = rp.get_param("bubble.r_pert")
pert_amplitude_factor = rp.get_param("bubble.pert_amplitude_factor")
# initialize the components, remember, that ener here is
# rho*eint + 0.5*rho*v**2, where eint is the specific
# internal energy (erg/g)
xmom[:, :] = 0.0
ymom[:, :] = 0.0
dens[:, :] = dens_cutoff
# set the density to be stratified in the y-direction
myg = my_data.grid
p = myg.scratch_array()
cs2 = scale_height*abs(grav)
for j in range(myg.jlo, myg.jhi+1):
dens[:, j] = max(dens_base*np.exp(-myg.y[j]/scale_height),<|fim▁hole|> p[:, j] = dens[:, j]*cs2
else:
p[:, j] = p[:, j-1] + 0.5*myg.dy*(dens[:, j] + dens[:, j-1])*grav
# set the energy (P = cs2*dens)
ener[:, :] = p[:, :]/(gamma - 1.0) + \
0.5*(xmom[:, :]**2 + ymom[:, :]**2)/dens[:, :]
r = np.sqrt((myg.x2d - x_pert)**2 + (myg.y2d - y_pert)**2)
idx = r <= r_pert
# boost the specific internal energy, keeping the pressure
# constant, by dropping the density
eint = (ener[idx] - 0.5*(xmom[idx]**2 - ymom[idx]**2)/dens[idx])/dens[idx]
pres = dens[idx]*eint*(gamma - 1.0)
eint = eint*pert_amplitude_factor
dens[idx] = pres/(eint*(gamma - 1.0))
ener[idx] = dens[idx]*eint + 0.5*(xmom[idx]**2 + ymom[idx]**2)/dens[idx]
# p[idx] = pres
rhoh = eos.rhoh_from_rho_p(gamma, dens, p)
W = 1 / (np.sqrt(1-(xmom**2-ymom**2)/dens))
dens[:, :] *= W
xmom[:, :] *= rhoh[:, :]/dens*W**2
ymom[:, :] *= rhoh[:, :]/dens*W**2
# HACK: didn't work but W = 1 so shall cheat
ener[:, :] = rhoh[:, :]*W**2 - p - dens[:, :]
# ener[:, :] = p / (gamma-1)
# print(ener[:,myg.jlo:myg.jhi])#*W[:,myg.jlo:myg.jhi]**2)
# exit()
def finalize():
""" print out any information to the user at the end of the run """
pass<|fim▁end|> | dens_cutoff)
if j == myg.jlo: |
<|file_name|>ios-wineglass.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>export default class IoIosWineglass extends React.Component<IconBaseProps, any> { }<|fim▁end|> | // TypeScript Version: 2.1
import * as React from 'react';
import { IconBaseProps } from 'react-icon-base'; |
<|file_name|>dataload.py<|end_file_name|><|fim▁begin|>#*******************************************************************************
# Copyright (c) 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import requests
import sys
import os
import json
from helpers.dbutils import CloudantDbUtils
from helpers.acmeair_utils import AcmeAirUtils
import conftest
# get the cloudant credentials from pytest config file
test_properties = conftest.test_properties()
class DataLoader:
"""
Test data loader related functions
"""
def load_AcmeData(self, num_of_cust):
"""
Reset databases and use the AcmeAir database loader to populate initial customer,
flight and airportmapping data. Does NOT generate user data like bookings.
"""
print ("num_of_cust: ", num_of_cust)
acmeair = AcmeAirUtils()
try:
if acmeair.is_acmeair_running() != 0:
raise RuntimeError("""
AcmeAir is already running which may cause unexpected results when
resetting databases. Please shut down the app and try again.
""")
else:
cloudantUtils = CloudantDbUtils(test_properties)
cloudantUtils.reset_databases()
acmeair.start_acmeair()
acmeair.load_data(num_of_cust)
finally:
acmeair.stop_acmeair()
def remove_AcmeDb(self, num_of_cust):
"""
Drop all AcmeAir databases
"""
acmeair = AcmeAirUtils()
if acmeair.is_acmeair_running() != 0:
acmeair.stop_acmeair()
cloudantUtils = CloudantDbUtils(test_properties)
cloudantUtils.drop_all_databases()
def load_SpecCharValuePredicateData(self):
"""
Create booking data needed to test SpecCharValuePredicate
"""
try:
acmeair = AcmeAirUtils()
acmeair.start_acmeair()
# book flights AA93 and AA330
flight1 = "AA93"
flight2 = "AA330"
# Step#1 - need to find the flights generated _id required for booking
flight1_id = acmeair.get_flightId_by_number(flight1)
print ("{} id = {}".format(flight1, flight1_id))
flight2_id = acmeair.get_flightId_by_number(flight2)
print ("{} id = {}".format(flight2, flight2_id))
# Step#2 - add the boooking
acmeair.book_flights("[email protected]", flight1, flight2)
finally:
acmeair.stop_acmeair()
if __name__ =='__main__':
"""
Utility to create test databases and load data
"""
import argparse
parser = argparse.ArgumentParser(description="Utility to load AcmeAir data required for python spark-cloudant tests")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-cleanup', action='store_true', help='Drop all test databases')
group.add_argument('-load', help='Reset and Load databases with the given # of users. -load 0 to just recreate databases and indexes.', type=int)
args = parser.parse_args()
dataloader = DataLoader()
if args.load is not None:
if args.load == 0:
cloudantUtils = CloudantDbUtils(test_properties)<|fim▁hole|> cloudantUtils.reset_databases()
else:
dataloader.load_AcmeData(args.load)
dataloader.load_SpecCharValuePredicateData()
elif args.cleanup:
cloudantUtils = CloudantDbUtils(test_properties)
cloudantUtils.drop_all_databases()<|fim▁end|> | |
<|file_name|>introspection.py<|end_file_name|><|fim▁begin|>from collections import namedtuple
# Structure returned by DatabaseIntrospection.get_table_list()
TableInfo = namedtuple('TableInfo', ['name', 'type'])
# Structure returned by the DB-API cursor.description interface (PEP 249)
FieldInfo = namedtuple('FieldInfo', 'name type_code display_size internal_size precision scale null_ok default')
class BaseDatabaseIntrospection:
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def column_name_converter(self, name):
"""
Apply a conversion to the column name for the purposes of comparison.
Uses table_name_converter() by default.
"""
return self.table_name_converter(name)
def table_names(self, cursor=None, include_views=False):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
def get_names(cursor):
return sorted(ti.name for ti in self.get_table_list(cursor)
if include_views or ti.type == 't')
if cursor is None:
with self.connection.cursor() as cursor:
return get_names(cursor)
return get_names(cursor)
def get_table_list(self, cursor):
"""
Returns an unsorted list of TableInfo named tuples of all tables and
views that exist in the database.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method')
def django_table_names(self, only_existing=False, include_views=True):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.apps import apps
from django.db import router
tables = set()
for app_config in apps.get_app_configs():
for model in router.get_migratable_models(app_config, self.connection.alias):
if not model._meta.managed:
continue
tables.add(model._meta.db_table)
tables.update(
f.m2m_db_table() for f in model._meta.local_many_to_many
if f.remote_field.through._meta.managed
)
tables = list(tables)
if only_existing:
existing_tables = self.table_names(include_views=include_views)
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.apps import apps
from django.db import router
all_models = []
for app_config in apps.get_app_configs():
all_models.extend(router.get_migratable_models(app_config, self.connection.alias))
tables = list(map(self.table_name_converter, tables))
return {
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
}
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.apps import apps
from django.db import models, router
sequence_list = []
for app_config in apps.get_app_configs():
for model in router.get_migratable_models(app_config, self.connection.alias):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.remote_field.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_key_columns() method')
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table.
"""
for constraint in self.get_constraints(cursor, table_name).values():
if constraint['primary_key']:
return constraint['columns'][0]
return None
def get_indexes(self, cursor, table_name):
"""
Deprecated in Django 1.11, use get_constraints instead.
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_indexes() method')
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Returns a dict mapping constraint names to their attributes,
where attributes is a dict with keys:
* columns: List of columns this covers
* primary_key: True if primary key, False otherwise
* unique: True if this is a unique constraint, False otherwise
* foreign_key: (table, column) of target, or None
* check: True if check constraint, False otherwise
* index: True if index, False otherwise.
* orders: The order (ASC/DESC) defined for the columns of indexes
* type: The type of the index (btree, hash, etc.)
Some backends may return special constraint names that don't exist
if they don't name constraints of a certain type (e.g. SQLite)<|fim▁hole|> raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_constraints() method')<|fim▁end|> | """ |
<|file_name|>cuehandler.cpp<|end_file_name|><|fim▁begin|>#include "cuehandler.hpp"
CueHandler *CueHandler::New(){
return new CueHandler;
}
/*void CueHandler::Execute(vtkObject *vtkNotUsed(caller), unsigned long event, void *calldata){
if((this->animator != 0) && (this->renderer != 0)){
vtkAnimationCue::AnimationCueInfo *info =
static_cast<vtkAnimationCue::AnimationCueInfo *>(calldata);
switch(event){
case vtkCommand::StartAnimationCueEvent:
animator->Start(info, this->renderer);
break;<|fim▁hole|> case vtkCommand::EndAnimationCueEvent:
animator->Stop(info, this->renderer);
break;
}
}
}
*/
void CueHandler::Execute(vtkObject *vtkNotUsed(caller), unsigned long event, void *calldata){
animator->render();
}
void CueHandler::RegisterAnimator(std::shared_ptr<ImageAnimator> animatorptr){
this->animator = animatorptr;
}
void CueHandler::RegisterRenderer(vtkRenderer* renderptr){
this->renderer = renderptr;
}<|fim▁end|> | case vtkCommand::AnimationCueTickEvent:
animator->Tick(info, this->renderer);
break; |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2.7
<|fim▁hole|> version='0.1a',
description='A wrapper to manage docker instances',
url='https://github.com/wnormandin/bftest_cli',
author='wnormandin',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python 2.7'
],
packages=find_packages(),
install_requires=['docker','click'],
py_modules=['bftest_cli'],
entry_points="""
[console_scripts]
dockcli=cli.dockcli:default
""",
)<|fim▁end|> | from setuptools import setup, find_packages
setup(
name='bftest_cli', |
<|file_name|>app.js<|end_file_name|><|fim▁begin|>var http = require("http");
http.createServer(function(request, response) {
response.writeHead(200, {
"Content-Type": "application/json"
});
response.write(JSON.stringify({<|fim▁hole|> response.end();
}).listen(3000);<|fim▁end|> | "api": "data"
})); |
<|file_name|>job_search_commute_search_test.py<|end_file_name|><|fim▁begin|># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.<|fim▁hole|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import job_search_commute_search
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
def test_commute_search(tenant):
jobs = job_search_commute_search.search_jobs(PROJECT_ID, tenant)
for job in jobs:
assert "projects/" in job<|fim▁end|> | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# |
<|file_name|>FileStream.java<|end_file_name|><|fim▁begin|>package com.teckcoder.crashengine.file;
import com.teckcoder.crashengine.utils.logger.Logger;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStream;
public class FileStream {
File file = null;
FileLocationType locationType = null;
private FileStream(String path, FileLocationType locationType){
this.locationType = locationType;
file = new File(path);
}
public static FileStream loadInternalFile(String path){
FileStream file = new FileStream(path, FileLocationType.INTERNAL);
return file;
}
public static FileStream loadExternalFile(String path){
FileStream file = new FileStream(path, FileLocationType.EXTERNAL);
return file;
}
public InputStream getInputStream() {
if(locationType == FileLocationType.EXTERNAL)
try {
return new FileInputStream(file.getPath().replace("\\", "/"));
} catch (FileNotFoundException e) {
e.printStackTrace();
Logger.logError("file missing", "Le fichier n'existe pas!");
}
else if(locationType == FileLocationType.INTERNAL)
return FileStream.class.getResourceAsStream("/"+file.getPath().replace("\\", "/"));
Logger.logError("file location", "FileLocationType erreur");
return null;
}
public File getFile(){
return file;
}
public FileLocationType getFileLocationType(){<|fim▁hole|><|fim▁end|> | return locationType;
}
} |
<|file_name|>belt_interpreter_node.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import rospy
from belt_parser import BeltParser
import tf
import tf2_ros
import math
import copy
from memory_definitions.srv import GetDefinition
from processing_belt_interpreter.msg import *
from drivers_ard_others.msg import BeltRange
from geometry_msgs.msg import Pose2D, TransformStamped, PointStamped
from ai_game_manager import StatusServices
from dynamic_reconfigure.server import Server
from processing_belt_interpreter.cfg import BeltInterpreterConfig
from multiprocessing import Lock
class BeltInterpreter(object):
def __init__(self):
super(BeltInterpreter, self).__init__()
rospy.init_node("belt_interpreter")
<|fim▁hole|>
# template for the sensor frame id, with '{}' being the sensor id
self.SENSOR_FRAME_ID = "belt_{}"
self.DEF_FILE = "processing/belt.xml"
self.TOPIC = "/processing/belt_interpreter/rects"
self.SENSORS_TOPIC = "/drivers/ard_others/belt_ranges"
self.PUB_RATE = rospy.Rate(10)
self.RECT_SCALE_WIDTH = 1.0
self.RECT_SCALE_HEIGHT = 1.0
self.WATCHDOG_PERIOD_BELT = rospy.Duration(0.015)
self.WATCHDOG_PERIOD_TERA = rospy.Duration(0.05)
self.PREVIOUS_DATA_SIZE = 2
filepath = self.fetch_definition()
self._belt_parser = BeltParser(filepath)
self._pub = rospy.Publisher(self.TOPIC, BeltRects, queue_size=1)
self._broadcaster = tf2_ros.StaticTransformBroadcaster()
self.pub_static_transforms()
self._sensors_sub = rospy.Subscriber(self.SENSORS_TOPIC, BeltRange,
self.callback)
self.syn_param_srv = Server(BeltInterpreterConfig, self.dyn_param_cb)
self._mutex = Lock()
self._watchdog = rospy.Timer(self.WATCHDOG_PERIOD_TERA, self.publish, oneshot=True)
self._current_rects = {}
self._current_statuses = {}
self._data_to_process = []
self._previous_rects = []
self._previous_statuses = []
self._same_bad_value_counter = {s: 0 for s in self._belt_parser.Sensors.keys()}
self._last_bad_value = {s: 0 for s in self._belt_parser.Sensors.keys()}
rospy.loginfo("Belt interpreter is ready. Listening for sensor data on '{}'.".format(self.SENSORS_TOPIC)) # TODO duplicate log with status_services.ready()
# Tell ai/game_manager the node initialized successfuly.
StatusServices("processing", "belt_interpreter").ready(True)
rospy.spin()
def dyn_param_cb(self, config, level):
self.RECT_SCALE_HEIGHT = config["RECT_SCALE_HEIGHT"]
self.RECT_SCALE_WIDTH = config["RECT_SCALE_WIDTH"]
rospy.loginfo("Set rect scale to (%f, %f)" % (self.RECT_SCALE_WIDTH, self.RECT_SCALE_HEIGHT))
return config
def publish(self, event):
with self._mutex:
if self._current_rects.keys() == ["sensor_tera1"] or not self._current_rects:
if self._watchdog:
self._watchdog.shutdown()
self._watchdog = rospy.Timer(self.WATCHDOG_PERIOD_TERA, self.publish, oneshot=True)
if len(self._current_rects) > 0:
self._previous_rects.append(copy.deepcopy(self._current_rects))
self._previous_statuses.append(copy.deepcopy(self._current_statuses))
if(len(self._previous_rects) > self.PREVIOUS_DATA_SIZE):
self._previous_rects.pop(0)
if (len(self._previous_statuses) > self.PREVIOUS_DATA_SIZE):
self._previous_statuses.pop(0)
self._pub.publish(self._current_rects.values())
self._current_rects.clear()
self._current_statuses.clear()
def process_range(self, data):
if data.sensor_id not in self._belt_parser.Sensors.keys():
rospy.logerr("Received data from belt sensor '{}' but no such sensor is defined"
.format(data.sensor_id))
return
with self._mutex:
params = self._belt_parser.Params[self._belt_parser.Sensors[data.sensor_id]["type"]]
if data.range > params["max_range"] or data.range <= 0:
self._current_statuses.update({data.sensor_id: False})
if data.range == self._last_bad_value[data.sensor_id]:
self._same_bad_value_counter[data.sensor_id] += 1
else:
self._same_bad_value_counter[data.sensor_id] = 0
self._last_bad_value[data.sensor_id] = data.range
if self._same_bad_value_counter[data.sensor_id] > 100:
rospy.logwarn_throttle(1, "Sensor %s might be disconnected !" % data.sensor_id)
# If we published this sensor most of the time and its bad, publish the last one we got
l = [data.sensor_id in d and d[data.sensor_id] for d in self._previous_statuses]
if sum(l) > math.ceil((self.PREVIOUS_DATA_SIZE + 1) / 2):
for d in reversed(self._previous_rects):
if data.sensor_id in d:
rospy.logdebug('Got bad data for sensor %s but publishing the last good data' % data.sensor_id)
r = d[data.sensor_id]
r.header.stamp = rospy.Time.now()
self._current_rects.update({data.sensor_id: d[data.sensor_id]})
return
return
self._same_bad_value_counter[data.sensor_id] = 0
if params["scale_responsive"]:
width = self.get_rect_width(data.range, params) * self.RECT_SCALE_WIDTH
height = self.get_rect_height(data.range, params) * self.RECT_SCALE_HEIGHT
else:
width = self.get_rect_width(data.range, params)
height = self.get_rect_height(data.range, params)
rect = RectangleStamped()
rect.header.frame_id = self.SENSOR_FRAME_ID.format(data.sensor_id)
rect.header.stamp = rospy.Time.now()
rect.x = self.get_rect_x(data.range, params)
rect.y = 0
rect.w = width
rect.h = height
rect.a = 0
self._current_rects.update({data.sensor_id: rect})
self._current_statuses.update({data.sensor_id: True})
def get_rect_width(self, r, params):
prec = r * params["precision"]
angle = params["angle"]
x_far = r + prec
x_close = math.cos(angle / 2) * (r - prec)
# called width because along x axis, but it is the smaller side
width = abs(x_far - x_close)
return width
def get_rect_height(self, r, params):
prec = r * params["precision"]
angle = params["angle"]
return abs(2 * math.sin(angle / 2) * (r + prec))
def get_rect_x(self, r, params):
prec = r * params["precision"]
angle = params["angle"]
x_far = r + prec
x_close = math.cos(angle / 2) * (r - prec)
return (x_far + x_close) / 2
def callback(self, data):
publish_now = False
if data.sensor_id in self._current_rects and data.sensor_id != 'sensor_tera1':
publish_now = True
self.process_range(data)
if data.sensor_id != 'sensor_tera1' and not publish_now:
if self._watchdog:
self._watchdog.shutdown()
self._watchdog = rospy.Timer(self.WATCHDOG_PERIOD_BELT, self.publish, oneshot=True)
elif publish_now:
self.publish(None)
def pub_static_transforms(self):
tr_list = []
for id, s in self._belt_parser.Sensors.items():
tr = TransformStamped()
tr.header.stamp = rospy.Time.now()
tr.header.frame_id = "robot"
tr.child_frame_id = self.SENSOR_FRAME_ID.format(id)
tr.transform.translation.x = s["x"]
tr.transform.translation.y = s["y"]
tr.transform.translation.z = 0
quat = tf.transformations.quaternion_from_euler(0, 0, s["a"])
tr.transform.rotation.x = quat[0]
tr.transform.rotation.y = quat[1]
tr.transform.rotation.z = quat[2]
tr.transform.rotation.w = quat[3]
tr_list.append(tr)
self._broadcaster.sendTransform(tr_list)
def fetch_definition(self):
get_def = rospy.ServiceProxy('/memory/definitions/get', GetDefinition)
get_def.wait_for_service()
try:
res = get_def(self.DEF_FILE)
if not res.success:
msg = "Can't fetch belt definition file. Shutting down."
rospy.logfatal(msg)
raise rospy.ROSInitException(msg)
else:
rospy.logdebug("Belt definition file fetched.")
return res.path
except rospy.ServiceException as exc:
msg = "Exception when fetching belt definition file. Shutting down.\n {}".format(str(exc))
rospy.logfatal(msg)
raise rospy.ROSInitException(msg)
if __name__ == '__main__':
b = BeltInterpreter()<|fim▁end|> | rospy.loginfo("Belt interpreter is initializing...") |
<|file_name|>options.go<|end_file_name|><|fim▁begin|>package middleware
import (
"net/http"
"strings"
"github.com/zenazn/goji/web"
)
type autoOptionsState int
const (
aosInit autoOptionsState = iota
aosHeaderWritten
aosProxying
)
// I originally used an httptest.ResponseRecorder here, but package httptest
// adds a flag which I'm not particularly eager to expose. This is essentially a
// ResponseRecorder that has been specialized for the purpose at hand to avoid
// the httptest dependency.
type autoOptionsProxy struct {
w http.ResponseWriter
c *web.C
state autoOptionsState
}
func (p *autoOptionsProxy) Header() http.Header {
return p.w.Header()
}
func (p *autoOptionsProxy) Write(buf []byte) (int, error) {
switch p.state {
case aosInit:
p.state = aosHeaderWritten
case aosProxying:
return len(buf), nil
}
return p.w.Write(buf)
}
func (p *autoOptionsProxy) WriteHeader(code int) {
methods := getValidMethods(*p.c)
switch p.state {
case aosInit:
if methods != nil && code == http.StatusNotFound {
p.state = aosProxying
break
}
p.state = aosHeaderWritten
fallthrough
default:
p.w.WriteHeader(code)
return
}
methods = addMethod(methods, "OPTIONS")
p.w.Header().Set("Allow", strings.Join(methods, ", "))
p.w.WriteHeader(http.StatusOK)
}
// AutomaticOptions automatically return an appropriate "Allow" header when the
// request method is OPTIONS and the request would have otherwise been 404'd.
func AutomaticOptions(c *web.C, h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if r.Method == "OPTIONS" {
w = &autoOptionsProxy{c: c, w: w}
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
func getValidMethods(c web.C) []string {
if c.Env == nil {
return nil
}
v, ok := c.Env[web.ValidMethodsKey]<|fim▁hole|> }
if methods, ok := v.([]string); ok {
return methods
}
return nil
}
func addMethod(methods []string, method string) []string {
for _, m := range methods {
if m == method {
return methods
}
}
return append(methods, method)
}<|fim▁end|> | if !ok {
return nil |
<|file_name|>test_monotone.py<|end_file_name|><|fim▁begin|>"""
Tests for L{monotone}.
"""
from hypothesis import given, strategies as st
import errno
from monotone import get_clock_info, monotonic
from monotone import _api, _bindings
import os
import platform
<|fim▁hole|> reason="POSIX-only tests (clock_gettime(3))",
)
needs_macos = pytest.mark.skipif(
platform.system() != "Darwin",
reason="macOS-only tests (mach_absolute_time(3))",
)
@pytest.fixture
def errno_value():
"""
A particular errno.
"""
return errno.EINVAL
@pytest.fixture
def strerror(errno_value):
"""
The string representation of a particular errno
"""
return "[Errno {}] Invalid argument".format(errno_value)
@pytest.fixture
def apply_failing_clock_call(monkeypatch):
"""
Return a callable that patches in a failing system call fake that
fails and return a list of calls to that fake.
"""
def _apply_failing_clock_call(name, errno_value):
calls = []
def _failing_clock_call(clock_id, timespec):
calls.append((clock_id, timespec))
monkeypatch.setattr(_api.ffi, "errno", errno.EINVAL)
return -1
monkeypatch.setattr(_api, name, _failing_clock_call)
return calls
return _apply_failing_clock_call
@pytest.fixture
def apply_timespec(monkeypatch):
"""
Return a callable that patches in a fake over the specified clock
call that sets the specified resolution and returns a list of
calls to that fake.
"""
def _apply_timespec(name, goal_timespec):
calls = []
def _fake_clock_call(clock_id, timespec):
calls.append((clock_id, timespec))
timespec[0] = goal_timespec[0]
return 0
monkeypatch.setattr(_api, name, _fake_clock_call)
return calls
return _apply_timespec
class TestSimpleNamespace(object):
"""
Tests for L{_SimpleNamespace}.
"""
def test_init(self):
"""
The initializer updates the instance's C{__dict__} with its
keyword arguments.
"""
namespace = _api._SimpleNamespace(x=1)
assert namespace.x == 1
def test_repr(self):
"""
The instance's repr reflects its C{__dict__}
"""
namespace = _api._SimpleNamespace()
namespace.y = 2
assert repr(namespace) == "namespace(y=2)"
def test_eq(self):
"""
Two instances with equal C{__dict__}s are equal.
"""
assert _api._SimpleNamespace(a=1) == _api._SimpleNamespace(a=1)
@needs_posix
class TestGetClockInfoPosix(object):
"""
Tests for L{get_clock_info}.
"""
def test_non_monotonic(self):
"""
L{get_clock_info} only knows about the monotonic clock.
"""
with pytest.raises(ValueError):
get_clock_info("not monotonic")
def test_failure(self, apply_failing_clock_call, errno_value, strerror):
"""
A failure in C{clock_getres} results in an L{OSError} that
presents the failure's errno.
"""
calls = apply_failing_clock_call('_clock_getres', errno_value)
with pytest.raises(OSError) as exc:
get_clock_info("monotonic")
assert len(calls) == 1
assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC
assert str(exc.value) == strerror
@given(
clock_getres_spec=st.fixed_dictionaries({
"tv_sec": st.sampled_from([0, 1]),
"tv_nsec": st.sampled_from([0, 1]),
}),
)
def test_info(self, clock_getres_spec, apply_timespec):
"""
The reported info always includes a nanosecond resolution when
C{clock_getres} indicates nanosecond resolution.
"""
calls = apply_timespec(
"_clock_getres",
_bindings.ffi.new("struct timespec *", clock_getres_spec),
)
expected_info = _api._SimpleNamespace(
adjustable=False,
implementation="clock_gettime(MONOTONIC)",
monotonic=True,
resolution=None, # checked separately
)
if clock_getres_spec['tv_nsec']:
expected_resolution = 1e-09
else:
expected_resolution = 1.0
info = get_clock_info("monotonic")
resolution, info.resolution = info.resolution, None
assert info == expected_info
assert resolution - expected_resolution == pytest.approx(0.0)
assert len(calls) == 1
assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC
@needs_macos
class TestGetClockInfoMacOS(object):
"""
Tests for L{get_clock_info}.
"""
def test_non_monotonic(self):
"""
L{get_clock_info} only knows about the monotonic clock.
"""
with pytest.raises(ValueError):
get_clock_info("not monotonic")
def test_info(self):
"""
The reported info always includes a nanosecond resolution.
"""
expected_info = _api._SimpleNamespace(
adjustable=False,
implementation="mach_absolute_time()",
monotonic=True,
resolution=None, # checked separately
)
expected_resolution = 1e-09
info = get_clock_info("monotonic")
resolution, info.resolution = info.resolution, None
assert info == expected_info
assert resolution - expected_resolution == pytest.approx(0.0)
@needs_posix
def test_monotonic_fails_posix(apply_failing_clock_call,
errno_value,
strerror):
"""
A failure in C{clock_gettime} results in an L{OSError} that
presents the failure's errno.
"""
calls = apply_failing_clock_call('_clock_gettime', errno_value)
with pytest.raises(OSError) as exc:
monotonic()
assert len(calls) == 1
assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC
assert str(exc.value) == strerror
@needs_posix
@given(
clock_gettime_spec=st.fixed_dictionaries({
"tv_sec": st.integers(min_value=0, max_value=2 ** 32 - 1),
"tv_nsec": st.integers(min_value=0, max_value=2 ** 32 - 1),
}),
)
def test_clock(clock_gettime_spec, apply_timespec):
"""
For any given time resolution, the monotonic time equals the
sum of the seconds and nanoseconds.
"""
clock_gettime_calls = apply_timespec(
'_clock_gettime',
_bindings.ffi.new("struct timespec *", clock_gettime_spec),
)
# we a float, representing the current seconds plus the
# nanoseconds (offset by a billion) iff the resolution is accurate
# to the nanosecond.
expected = float(clock_gettime_spec['tv_sec']) + (
clock_gettime_spec['tv_nsec'] * 1e-09)
result = monotonic()
assert result - expected == pytest.approx(0.0)
assert clock_gettime_calls[0][0] == _bindings.lib.CLOCK_MONOTONIC
def test_clock_increases():
"""
A monotonic moment is never greater than a succeeding monotonic
moment.
"""
assert monotonic() <= monotonic()<|fim▁end|> | import pytest
needs_posix = pytest.mark.skipif(
os.name == "posix" and platform.system() == "Darwin", |
<|file_name|>EnsuresNonNullIfInheritedTest.java<|end_file_name|><|fim▁begin|>import org.checkerframework.checker.nullness.qual.*;
import org.checkerframework.dataflow.qual.Pure;
class Node {
int id;
@Nullable Node next;
Node(int id, @Nullable Node next) {
this.id = id;
this.next = next;
}
}
class SubEnumerate {
protected @Nullable Node current;
public SubEnumerate(Node node) {
this.current = node;
}
@EnsuresNonNullIf(expression = "current", result = true)
public boolean hasMoreElements() {
return (current != null);
}
}
class Enumerate extends SubEnumerate {
public Enumerate(Node node) {
super(node);
}
<|fim▁hole|> }
}
class Main {
public static final void main(String args[]) {
Node n2 = new Node(2, null);
Node n1 = new Node(1, n2);
Enumerate e = new Enumerate(n1);
while (e.hasMoreElements()) {}
}
}<|fim▁end|> | public boolean hasMoreElements() {
return (current != null); |
<|file_name|>LensDistortOpTest.py<|end_file_name|><|fim▁begin|>##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#<|fim▁hole|>#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from IECore import *
import sys
import unittest
class LensDistortOpTest(unittest.TestCase):
def testDistortOpWithStandardLensModel(self):
# The lens model and parameters to use.
o = CompoundObject()
o["lensModel"] = StringData( "StandardRadialLensModel" )
o["distortion"] = DoubleData( 0.2 )
o["anamorphicSqueeze"] = DoubleData( 1. )
o["curvatureX"] = DoubleData( 0.2 )
o["curvatureY"] = DoubleData( 0.5 )
o["quarticDistortion"] = DoubleData( .1 )
# The input image to read.
r = EXRImageReader("test/IECore/data/exrFiles/uvMapWithDataWindow.100x100.exr")
img = r.read()
# Create the Op and set it's parameters.
op = LensDistortOp()
op["input"] = img
op["mode"] = LensModel.Undistort
op['lensModel'].setValue(o)
# Run the Op.
out = op()
r = EXRImageReader("test/IECore/data/exrFiles/uvMapWithDataWindowDistorted.100x100.exr")
img2 = r.read()
self.assertEqual( img.displayWindow, img2.displayWindow )<|fim▁end|> | # * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer. |
<|file_name|>site.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# This file is part of Dyko
# Copyright © 2008-2010 Kozea<|fim▁hole|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kalamar. If not, see <http://www.gnu.org/licenses/>.
"""
Site
====
Site class. Create one for each independent site with its own configuration.
"""
import logging
from .request import normalize, make_request, And, Condition, Or, Not
from .query import QueryFilter, QuerySelect, QueryChain, QueryOrder, QueryRange,\
QueryDistinct, QueryAggregate
from .access_point import DEFAULT_PARAMETER
def _translate_request(request, aliases):
"""Translate high-level ``request`` to low-level using ``aliases``."""
if isinstance(request, And):
return And(*(_translate_request(req, aliases)
for req in request.sub_requests))
elif isinstance(request, Or):
return Or(*(_translate_request(req, aliases)
for req in request.sub_requests))
elif isinstance(request, Not):
return Not(_translate_request(request.sub_request, aliases))
elif isinstance(request, Condition):
name = repr(request.property)
if name in aliases:
# The complete path has already been selected,
# Let's use the alias instead !
new_name = aliases.get(name, name)
request.property.name = new_name
request.property.child_property = None
return request
elif name in aliases.values():
return request
elif ".".join(name.split(".")[:-1] + ["*"]) in aliases:
return request
else:
new_name = "__%s" % name.replace(".", "_")
aliases[name] = new_name
request.property.name = new_name
request.property.child_property = None
return request
def _delegate_to_acces_point(method_name, first_arg_is_a_request=False):
"""Create a function delegating ``method_name`` to an access point."""
if first_arg_is_a_request:
def wrapper(self, access_point_name, request=None, *args, **kwargs):
"""Call ``access_point.method_name(request, *args, **kwargs)``."""
access_point = self.access_points[access_point_name]
request = normalize(access_point.properties, request)
return getattr(access_point, method_name)(request, *args, **kwargs)
else:
def wrapper(self, access_point_name, *args, **kwargs):
"""Call ``access_point.method_name(*args, **kwargs)``."""
access_point = self.access_points[access_point_name]
return getattr(access_point, method_name)(*args, **kwargs)
# Redefining documentation and name of the wrappers
# pylint: disable=W0622
wrapper.__name__ = method_name
wrapper.__doc__ = \
"Call :meth:`kalamar.access_point.AccessPoint.%s`." % method_name
# pylint: enable=W0622
return wrapper
class Site(object):
"""Kalamar site."""
def __init__(self):
self.access_points = {}
self.logger = logging.getLogger("dyko")
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
self.logger.addHandler(NullHandler())
def register(self, name, access_point):
"""Add an access point to this site.
:param name: Identifier string of the added access point.
:param access_point: Concrete subclass of :class:`AccessPoint`.
"""
if name in self.access_points:
raise RuntimeError(
"Site already has an access point named %r." % name)
self.access_points[name] = access_point
access_point.bind(self, name)
def view(self, access_point_name, aliases=None, request=None, order_by=None,
select_range=None, distinct=False, aggregate=None, query=None):
"""Call :meth:`kalamar.access_point.AccessPoint.view`.
If ``alias`` and ``request`` are given, a query is created from them.
The query is then validated and then passed to the ``view`` method of
the acess point called ``access_point_name``.
"""
access_point = self.access_points[access_point_name]
if aliases is None:
aliases = {"": "*"}
if query is None:
# Add dummy selects to be able to filter on those
chain = []
aliases = dict(((value, key) for key, value in aliases.items()))
request = make_request(request)
request = _translate_request(request, aliases)
aliases = dict(((value, key) for key, value in aliases.items()))
chain.append(QuerySelect(aliases))
chain.append(QueryFilter(request))
if distinct:
chain.append(QueryDistinct())
if order_by is not None:
chain.append(QueryOrder(order_by))
if aggregate is not None:
chain.append(QueryAggregate(aggregate))
if select_range is not None:
if hasattr(select_range, "__iter__"):
select_range = slice(*select_range)
else:
select_range = slice(select_range)
chain.append(QueryRange(select_range))
query = QueryChain(chain)
query.validate(access_point.properties)
for line in access_point.view(query):
for prop_name in [name for name in line if name.startswith("__")]:
line.pop(prop_name)
yield line
def from_repr(self, access_point_name, repr, default=DEFAULT_PARAMETER):
"""
Return an item of ``access_point_name`` from the ``repr`` string.
``repr`` should have been generated with item.__repr__()
"""
access_point = self.access_points[access_point_name]
return access_point.loader_from_reference_repr(repr)(None)[0]
create = _delegate_to_acces_point("create")
delete = _delegate_to_acces_point("delete")
delete_many = _delegate_to_acces_point("delete_many", True)
open = _delegate_to_acces_point("open", True)
search = _delegate_to_acces_point("search", True)
save = _delegate_to_acces_point("save")<|fim▁end|> | #
# This library is free software: you can redistribute it and/or modify |
<|file_name|>js_Fc4I144XPrPKyUpaWv36lNESuazCkfla6EpZyDPBOQk.js<|end_file_name|><|fim▁begin|>(function ($) {
/**
* Attach the child dialog behavior to new content.
*/
Drupal.behaviors.overlayChild = {
attach: function (context, settings) {
// Make sure this behavior is not processed more than once.
if (this.processed) {
return;
}
this.processed = true;
// If we cannot reach the parent window, break out of the overlay.
if (!parent.Drupal || !parent.Drupal.overlay) {
window.location = window.location.href.replace(/([?&]?)render=overlay&?/g, '$1').replace(/\?$/, '');
}
var settings = settings.overlayChild || {};
// If the entire parent window should be refreshed when the overlay is
// closed, pass that information to the parent window.
if (settings.refreshPage) {
parent.Drupal.overlay.refreshPage = true;
}
// If a form has been submitted successfully, then the server side script
// may have decided to tell the parent window to close the popup dialog.
if (settings.closeOverlay) {
parent.Drupal.overlay.bindChild(window, true);
// Use setTimeout to close the child window from a separate thread,
// because the current one is busy processing Drupal behaviors.
setTimeout(function () {
if (typeof settings.redirect == 'string') {
parent.Drupal.overlay.redirect(settings.redirect);
}
else {
parent.Drupal.overlay.close();
}
}, 1);
return;
}
// If one of the regions displaying outside the overlay needs to be
// reloaded immediately, let the parent window know.
if (settings.refreshRegions) {
parent.Drupal.overlay.refreshRegions(settings.refreshRegions);
}
// Ok, now we can tell the parent window we're ready.
parent.Drupal.overlay.bindChild(window);
// IE8 crashes on certain pages if this isn't called; reason unknown.
window.scrollTo(window.scrollX, window.scrollY);
// Attach child related behaviors to the iframe document.
Drupal.overlayChild.attachBehaviors(context, settings);
// There are two links within the message that informs people about the
// overlay and how to disable it. Make sure both links are visible when
// either one has focus and add a class to the wrapper for styling purposes.
$('#overlay-disable-message', context)
.focusin(function () {
$(this).addClass('overlay-disable-message-focused');
$('a.element-focusable', this).removeClass('element-invisible');
})
.focusout(function () {
$(this).removeClass('overlay-disable-message-focused');
$('a.element-focusable', this).addClass('element-invisible');
});
}
};
/**
* Overlay object for child windows.
*/
Drupal.overlayChild = Drupal.overlayChild || {
behaviors: {}
};
Drupal.overlayChild.prototype = {};
/**
* Attach child related behaviors to the iframe document.
*/
Drupal.overlayChild.attachBehaviors = function (context, settings) {
$.each(this.behaviors, function () {
this(context, settings);
});
};
/**
* Capture and handle clicks.
*
* Instead of binding a click event handler to every link we bind one to the
* document and handle events that bubble up. This also allows other scripts
* to bind their own handlers to links and also to prevent overlay's handling.
*/
Drupal.overlayChild.behaviors.addClickHandler = function (context, settings) {
$(document).bind('click.drupal-overlay mouseup.drupal-overlay', $.proxy(parent.Drupal.overlay, 'eventhandlerOverrideLink'));
};
/**
* Modify forms depending on their relation to the overlay.
*
* By default, forms are assumed to keep the flow in the overlay. Thus their
* action attribute get a ?render=overlay suffix.
*/
Drupal.overlayChild.behaviors.parseForms = function (context, settings) {
$('form', context).once('overlay', function () {
// Obtain the action attribute of the form.
var action = $(this).attr('action');
// Keep internal forms in the overlay.
if (action == undefined || (action.indexOf('http') != 0 && action.indexOf('https') != 0)) {
action += (action.indexOf('?') > -1 ? '&' : '?') + 'render=overlay';
$(this).attr('action', action);
}
// Submit external forms into a new window.
else {
$(this).attr('target', '_new');
}
});
};
/**
* Replace the overlay title with a message while loading another page.
*/
Drupal.overlayChild.behaviors.loading = function (context, settings) {
var $title;
var text = Drupal.t('Loading');
var dots = '';
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$title = $('#overlay-title').text(text);
var id = setInterval(function () {
dots = (dots.length > 10) ? '' : dots + '.';
$title.text(text + dots);
}, 500);
});
};
/**
* Switch active tab immediately.
*/
Drupal.overlayChild.behaviors.tabs = function (context, settings) {
var $tabsLinks = $('#overlay-tabs > li > a');
$('#overlay-tabs > li > a').bind('click.drupal-overlay', function () {
var active_tab = Drupal.t('(active tab)');
$tabsLinks.parent().siblings().removeClass('active').find('element-invisible:contains(' + active_tab + ')').appendTo(this);
$(this).parent().addClass('active');
});
};
/**
* If the shortcut add/delete button exists, move it to the overlay titlebar.
*/
Drupal.overlayChild.behaviors.shortcutAddLink = function (context, settings) {
// Remove any existing shortcut button markup from the titlebar.
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
// If the shortcut add/delete button exists, move it to the titlebar.
var $addToShortcuts = $('.add-or-remove-shortcuts');
if ($addToShortcuts.length) {
$addToShortcuts.insertAfter('#overlay-title');
}
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
});
};
/**
* Use displacement from parent window.
*/
Drupal.overlayChild.behaviors.alterTableHeaderOffset = function (context, settings) {
if (Drupal.settings.tableHeaderOffset) {
Drupal.overlayChild.prevTableHeaderOffset = Drupal.settings.tableHeaderOffset;
}
Drupal.settings.tableHeaderOffset = 'Drupal.overlayChild.tableHeaderOffset';
};
/**
* Callback for Drupal.settings.tableHeaderOffset.
*/
Drupal.overlayChild.tableHeaderOffset = function () {
var topOffset = Drupal.overlayChild.prevTableHeaderOffset ? eval(Drupal.overlayChild.prevTableHeaderOffset + '()') : 0;
return topOffset + parseInt($(document.body).css('marginTop'));
};
})(jQuery);
;
(function ($) {
/**
* Retrieves the summary for the first element.
*/
$.fn.drupalGetSummary = function () {
var callback = this.data('summaryCallback');
return (this[0] && callback) ? $.trim(callback(this[0])) : '';
};
/**
* Sets the summary for all matched elements.
*
* @param callback
* Either a function that will be called each time the summary is
* retrieved or a string (which is returned each time).
*/
$.fn.drupalSetSummary = function (callback) {
var self = this;
// To facilitate things, the callback should always be a function. If it's
// not, we wrap it into an anonymous function which just returns the value.
if (typeof callback != 'function') {
var val = callback;
callback = function () { return val; };
}
return this
.data('summaryCallback', callback)
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind('formUpdated.summary')
.bind('formUpdated.summary', function () {
self.trigger('summaryUpdated');
})
// The actual summaryUpdated handler doesn't fire when the callback is
// changed, so we have to do this manually.
.trigger('summaryUpdated');
};
/**
* Sends a 'formUpdated' event each time a form element is modified.
*/
Drupal.behaviors.formUpdated = {
attach: function (context) {
// These events are namespaced so that we can remove them later.
var events = 'change.formUpdated click.formUpdated blur.formUpdated keyup.formUpdated';
$(context)
// Since context could be an input element itself, it's added back to
// the jQuery object and filtered again.
.find(':input').andSelf().filter(':input')
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind(events).bind(events, function () {
$(this).trigger('formUpdated');
});
}
};
/**
* Prepopulate form fields with information from the visitor cookie.
*/
Drupal.behaviors.fillUserInfoFromCookie = {
attach: function (context, settings) {
$('form.user-info-from-cookie').once('user-info-from-cookie', function () {
var formContext = this;
$.each(['name', 'mail', 'homepage'], function () {
var $element = $('[name=' + this + ']', formContext);
var cookie = $.cookie('Drupal.visitor.' + this);
if ($element.length && cookie) {
$element.val(cookie);
}
});
});
}
};
})(jQuery);
;
(function ($) {
/**
* The base States namespace.
*
* Having the local states variable allows us to use the States namespace
* without having to always declare "Drupal.states".
*/
var states = Drupal.states = {
// An array of functions that should be postponed.
postponed: []
};
/**
* Attaches the states.
*/
Drupal.behaviors.states = {
attach: function (context, settings) {
for (var selector in settings.states) {
for (var state in settings.states[selector]) {
new states.Dependent({
element: $(selector),
state: states.State.sanitize(state),
dependees: settings.states[selector][state]
});
}
}
// Execute all postponed functions now.
while (states.postponed.length) {
(states.postponed.shift())();
}
}
};
/**
* Object representing an element that depends on other elements.
*
* @param args
* Object with the following keys (all of which are required):
* - element: A jQuery object of the dependent element
* - state: A State object describing the state that is dependent
* - dependees: An object with dependency specifications. Lists all elements
* that this element depends on.
*/
states.Dependent = function (args) {
$.extend(this, { values: {}, oldValue: undefined }, args);
for (var selector in this.dependees) {
this.initializeDependee(selector, this.dependees[selector]);
}
};
/**
* Comparison functions for comparing the value of an element with the
* specification from the dependency settings. If the object type can't be
* found in this list, the === operator is used by default.
*/
states.Dependent.comparisons = {
'RegExp': function (reference, value) {
return reference.test(value);
},
'Function': function (reference, value) {
// The "reference" variable is a comparison function.
return reference(value);
},
'Number': function (reference, value) {
// If "reference" is a number and "value" is a string, then cast reference
// as a string before applying the strict comparison in compare(). Otherwise
// numeric keys in the form's #states array fail to match string values
// returned from jQuery's val().
return (value.constructor.name === 'String') ? compare(String(reference), value) : compare(reference, value);
}
};
states.Dependent.prototype = {
/**
* Initializes one of the elements this dependent depends on.
*
* @param selector
* The CSS selector describing the dependee.
* @param dependeeStates
* The list of states that have to be monitored for tracking the
* dependee's compliance status.
*/
initializeDependee: function (selector, dependeeStates) {
var self = this;
// Cache for the states of this dependee.
self.values[selector] = {};
$.each(dependeeStates, function (state, value) {
state = states.State.sanitize(state);
// Initialize the value of this state.
self.values[selector][state.pristine] = undefined;
// Monitor state changes of the specified state for this dependee.
$(selector).bind('state:' + state, function (e) {
var complies = self.compare(value, e.value);
self.update(selector, state, complies);
});
// Make sure the event we just bound ourselves to is actually fired.
new states.Trigger({ selector: selector, state: state });
});
},
/**
* Compares a value with a reference value.
*
* @param reference
* The value used for reference.
* @param value
* The value to compare with the reference value.
* @return
* true, undefined or false.
*/
compare: function (reference, value) {
if (reference.constructor.name in states.Dependent.comparisons) {
// Use a custom compare function for certain reference value types.
return states.Dependent.comparisons[reference.constructor.name](reference, value);
}
else {
// Do a plain comparison otherwise.
return compare(reference, value);
}
},
/**
* Update the value of a dependee's state.
*
* @param selector
* CSS selector describing the dependee.
* @param state
* A State object describing the dependee's updated state.
* @param value
* The new value for the dependee's updated state.
*/
update: function (selector, state, value) {
// Only act when the 'new' value is actually new.
if (value !== this.values[selector][state.pristine]) {
this.values[selector][state.pristine] = value;
this.reevaluate();
}
},
/**
* Triggers change events in case a state changed.
*/
reevaluate: function () {
var value = undefined;
// Merge all individual values to find out whether this dependee complies.
for (var selector in this.values) {
for (var state in this.values[selector]) {
state = states.State.sanitize(state);
var complies = this.values[selector][state.pristine];
value = ternary(value, invert(complies, state.invert));
}
}
// Only invoke a state change event when the value actually changed.
if (value !== this.oldValue) {
// Store the new value so that we can compare later whether the value
// actually changed.
this.oldValue = value;
// Normalize the value to match the normalized state name.
value = invert(value, this.state.invert);
// By adding "trigger: true", we ensure that state changes don't go into
// infinite loops.
this.element.trigger({ type: 'state:' + this.state, value: value, trigger: true });
}
}
};
states.Trigger = function (args) {
$.extend(this, args);
if (this.state in states.Trigger.states) {
this.element = $(this.selector);
// Only call the trigger initializer when it wasn't yet attached to this
// element. Otherwise we'd end up with duplicate events.
if (!this.element.data('trigger:' + this.state)) {
this.initialize();
}
}
};
states.Trigger.prototype = {
initialize: function () {
var self = this;
var trigger = states.Trigger.states[this.state];
if (typeof trigger == 'function') {
// We have a custom trigger initialization function.
trigger.call(window, this.element);
}
else {
$.each(trigger, function (event, valueFn) {
self.defaultTrigger(event, valueFn);
});
}
// Mark this trigger as initialized for this element.
this.element.data('trigger:' + this.state, true);
},
defaultTrigger: function (event, valueFn) {
var self = this;
var oldValue = valueFn.call(this.element);
// Attach the event callback.
this.element.bind(event, function (e) {
var value = valueFn.call(self.element, e);
// Only trigger the event if the value has actually changed.
if (oldValue !== value) {
self.element.trigger({ type: 'state:' + self.state, value: value, oldValue: oldValue });
oldValue = value;
}
});
states.postponed.push(function () {
// Trigger the event once for initialization purposes.
self.element.trigger({ type: 'state:' + self.state, value: oldValue, oldValue: undefined });
});
}
};
/**
* This list of states contains functions that are used to monitor the state
* of an element. Whenever an element depends on the state of another element,
* one of these trigger functions is added to the dependee so that the
* dependent element can be updated.
*/
states.Trigger.states = {
// 'empty' describes the state to be monitored
empty: {
// 'keyup' is the (native DOM) event that we watch for.
'keyup': function () {
// The function associated to that trigger returns the new value for the
// state.
return this.val() == '';
}
},
checked: {
'change': function () {
return this.attr('checked');
}
},
// For radio buttons, only return the value if the radio button is selected.
value: {
'keyup': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
},
'change': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
}
},
collapsed: {
'collapsed': function(e) {
return (e !== undefined && 'value' in e) ? e.value : this.is('.collapsed');
}
}
};
/**
* A state object is used for describing the state and performing aliasing.
*/
states.State = function(state) {
// We may need the original unresolved name later.
this.pristine = this.name = state;
// Normalize the state name.
while (true) {
// Iteratively remove exclamation marks and invert the value.
while (this.name.charAt(0) == '!') {
this.name = this.name.substring(1);
this.invert = !this.invert;
}
// Replace the state with its normalized name.
if (this.name in states.State.aliases) {
this.name = states.State.aliases[this.name];
}
else {
break;
}
}
};
/**
* Create a new State object by sanitizing the passed value.
*/
states.State.sanitize = function (state) {
if (state instanceof states.State) {
return state;
}
else {
return new states.State(state);
}
};
/**
* This list of aliases is used to normalize states and associates negated names
* with their respective inverse state.
*/
states.State.aliases = {
'enabled': '!disabled',
'invisible': '!visible',
'invalid': '!valid',
'untouched': '!touched',
'optional': '!required',
'filled': '!empty',
'unchecked': '!checked',
'irrelevant': '!relevant',
'expanded': '!collapsed',
'readwrite': '!readonly'
};
states.State.prototype = {
invert: false,
/**
* Ensures that just using the state object returns the name.
*/
toString: function() {
return this.name;
}
};
/**
* Global state change handlers. These are bound to "document" to cover all
* elements whose state changes. Events sent to elements within the page
* bubble up to these handlers. We use this system so that themes and modules
* can override these state change handlers for particular parts of a page.
*/
{
$(document).bind('state:disabled', function(e) {
// Only act when this change was triggered by a dependency and not by the
// element monitoring itself.
if (e.trigger) {
$(e.target)
.attr('disabled', e.value)
.filter('.form-element')
.closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'addClass' : 'removeClass']('form-disabled');
// Note: WebKit nightlies don't reflect that change correctly.
// See https://bugs.webkit.org/show_bug.cgi?id=23789
}
});
$(document).bind('state:required', function(e) {
if (e.trigger) {
if (e.value) {<|fim▁hole|> else {
$(e.target).closest('.form-item, .form-wrapper').find('label .form-required').remove();
}
}
});
$(document).bind('state:visible', function(e) {
if (e.trigger) {
$(e.target).closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'show' : 'hide']();
}
});
$(document).bind('state:checked', function(e) {
if (e.trigger) {
$(e.target).attr('checked', e.value);
}
});
$(document).bind('state:collapsed', function(e) {
if (e.trigger) {
if ($(e.target).is('.collapsed') !== e.value) {
$('> legend a', e.target).click();
}
}
});
}
/**
* These are helper functions implementing addition "operators" and don't
* implement any logic that is particular to states.
*/
{
// Bitwise AND with a third undefined state.
function ternary (a, b) {
return a === undefined ? b : (b === undefined ? a : a && b);
};
// Inverts a (if it's not undefined) when invert is true.
function invert (a, invert) {
return (invert && a !== undefined) ? !a : a;
};
// Compares two values while ignoring undefined values.
function compare (a, b) {
return (a === b) ? (a === undefined ? a : true) : (a === undefined || b === undefined);
}
}
})(jQuery);
;<|fim▁end|> | $(e.target).closest('.form-item, .form-wrapper').find('label').append('<span class="form-required">*</span>');
} |
<|file_name|>version.js<|end_file_name|><|fim▁begin|>const exec = require('child_process').exec;
const ver = require('../package.json').version;
let hash;
module.exports.init = function (b) {
exec('git rev-parse HEAD', (error, stdout, stderr) => {
if (error != null) {
hash = null;
console.err("git's broken yo");
}<|fim▁hole|>module.exports.run = function (r, parts, reply) {
reply(`${ver}: ${hash}`);
};
module.exports.commands = ['version'];<|fim▁end|> | hash = stdout;
});
};
|
<|file_name|>GeoPointRecord.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2015 Luca Capra <[email protected]>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at<|fim▁hole|> *
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.createnet.compose.data;
/**
*
* @author Luca Capra <[email protected]>
*/
public class GeoPointRecord extends Record<String> {
public Point point;
protected String value;
public GeoPointRecord() {}
public GeoPointRecord(String point) {
this.point = new Point(point);
}
public GeoPointRecord(double latitude, double longitude) {
this.point = new Point(latitude, longitude);
}
@Override
public String getValue() {
return point.toString();
}
@Override
public void setValue(Object value) {
this.value = parseValue(value);
this.point = new Point(this.value);
}
@Override
public String parseValue(Object raw) {
return (String)raw;
}
@Override
public String getType() {
return "geo_point";
}
public class Point {
public double latitude;
public double longitude;
public Point(double latitude, double longitude) {
this.latitude = latitude;
this.longitude = longitude;
}
public Point(String val) {
String[] coords = val.split(",");
longitude = Double.parseDouble(coords[0].trim());
latitude = Double.parseDouble(coords[1].trim());
}
@Override
public String toString() {
return this.longitude + "," + this.latitude;
}
}
}<|fim▁end|> | |
<|file_name|>issue-9968.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast check-fast doesn't like extern mod
// aux-build:issue-9968.rs<|fim▁hole|>use lib::{Trait, Struct};
pub fn main()
{
Struct::init().test();
}<|fim▁end|> |
extern mod lib = "issue-9968";
|
<|file_name|>wmlparser3.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# encoding: utf-8
"""
This parser uses the --preprocess option of wesnoth so a working
wesnoth executable must be available at runtime if the WML to parse
contains preprocessing directives.
Pure WML can be parsed as is.
For example:
wml = ""
[unit]
id=elve
name=Elve
[abilities]
[damage]
id=Ensnare
[/dama ge]
[/abilities]
[/unit]
""
p = Parser()
cfg = p.parse_text(wml)
for unit in cfg.get_all(tag = "unit"):
print(unit.get_text_val("id"))
print(unit.get_text_val("name"))
for abilities in unit.get_all(tag = "abilitities"):
for ability in abilities.get_all(tag = ""):
print(ability.get_name())
print(ability.get_text_val("id"))
Because no preprocessing is required, we did not have to pass the
location of the wesnoth executable to Parser.
The get_all method always returns a list over matching tags or
attributes.
The get_name method can be used to get the name and the get_text_val
method can be used to query the value of an attribute.
"""
import os, glob, sys, re, subprocess, argparse, tempfile, shutil
import atexit
tempdirs_to_clean = []
tmpfiles_to_clean = []
@atexit.register
def cleaner():
for temp_dir in tempdirs_to_clean:
shutil.rmtree(temp_dir, ignore_errors=True)
for temp_file in tmpfiles_to_clean:
os.remove(temp_file)
class WMLError(Exception):
"""
Catch this exception to retrieve the first error message from
the parser.
"""
def __init__(self, parser=None, message=None):
if parser:
self.line = parser.parser_line
self.wml_line = parser.last_wml_line
self.message = message
self.preprocessed = parser.preprocessed
def __str__(self):
return """WMLError:
%s %s
%s
%s
""" % (str(self.line), self.preprocessed, self.wml_line, self.message)
class StringNode:
"""
One part of an attribute's value. Because a single WML string
can be made from multiple translatable strings we model
it as a list of several StringNode each with its own text domain.
"""
def __init__(self, data: bytes):
self.textdomain = None # non-translatable by default
self.data = data
def wml(self) -> bytes:
if not self.data:
return b""
return self.data
def debug(self):
if self.textdomain:
return "_<%s>'%s'" % (self.textdomain,
self.data.decode("utf8", "ignore"))
else:
return "'%s'" % self.data.decode("utf8", "ignore")
def __str__(self):
return "StringNode({})".format(self.debug())
def __repr__(self):
return str(self)
class AttributeNode:
"""
A WML attribute. For example the "id=Elfish Archer" in:
[unit]
id=Elfish Archer
[/unit]
"""
def __init__(self, name, location=None):
self.name = name
self.location = location
self.value = [] # List of StringNode
def wml(self) -> bytes:
s = self.name + b"=\""
for v in self.value:
s += v.wml().replace(b"\"", b"\"\"")
s += b"\""
return s
def debug(self):
return self.name.decode("utf8") + "=" + " .. ".join(
[v.debug() for v in self.value])
def get_text(self, translation=None) -> str:
"""
Returns a text representation of the node's value. The
translation callback, if provided, will be called on each
partial string with the string and its corresponding textdomain
and the returned translation will be used.
"""
r = ""
for s in self.value:
ustr = s.data.decode("utf8", "ignore")
if translation:
r += translation(ustr, s.textdomain)
else:
r += ustr
return r
def get_binary(self):
"""
Returns the unmodified binary representation of the value.
"""
r = b""
for s in self.value:
r += s.data
return r
def get_name(self):
return self.name.decode("utf8")
def __str__(self):
return "AttributeNode({})".format(self.debug())
def __repr__(self):
return str(self)
class TagNode:
"""
A WML tag. For example the "unit" in this example:
[unit]
id=Elfish Archer
[/unit]
"""
def __init__(self, name, location=None):
self.name = name
self.location = location
# List of child elements, which are either of type TagNode or
# AttributeNode.
self.data = []
self.speedy_tags = {}
def wml(self) -> bytes:
"""
Returns a (binary) WML representation of the entire node.
All attribute values are enclosed in quotes and quotes are
escaped (as double quotes). Note that no other escaping is
performed (see the BinaryWML specification for additional
escaping you may require).
"""
s = b"[" + self.name + b"]\n"
for sub in self.data:
s += sub.wml() + b"\n"
s += b"[/" + self.name.lstrip(b'+') + b"]\n"
return s
def debug(self):
s = "[%s]\n" % self.name.decode("utf8")
for sub in self.data:
for subline in sub.debug().splitlines():
s += " %s\n" % subline
s += "[/%s]\n" % self.name.decode("utf8").lstrip('+')
return s
def get_all(self, **kw):
"""
This gets all child tags or child attributes of the tag.
For example:
[unit]
name=A
name=B
[attack]
[/attack]
[attack]
[/attack]
[/unit]
unit.get_all(att = "name")
will return two nodes for "name=A" and "name=B"
unit.get_all(tag = "attack")
will return two nodes for the two [attack] tags.
unit.get_all()
will return 4 nodes for all 4 sub-elements.
unit.get_all(att = "")
Will return the two attribute nodes.
unit.get_all(tag = "")
Will return the two tag nodes.
If no elements are found an empty list is returned.
"""
if len(kw) == 1 and "tag" in kw and kw["tag"]:
return self.speedy_tags.get(kw["tag"].encode("utf8"), [])
r = []
for sub in self.data:
ok = True
for k, v in list(kw.items()):
v = v.encode("utf8")
if k == "tag":
if not isinstance(sub, TagNode):
ok = False
elif v != b"" and sub.name != v:
ok = False
elif k == "att":
if not isinstance(sub, AttributeNode):
ok = False
elif v != b"" and sub.name != v:
ok = False
if ok:
r.append(sub)
return r
def get_text_val(self, name, default=None, translation=None, val=-1):
"""
Returns the value of the specified attribute. If the attribute
is given multiple times, the value number val is returned (default
behaviour being to return the last value). If the
attribute is not found, the default parameter is returned.
If a translation is specified, it should be a function which
when passed a unicode string and text-domain returns a
translation of the unicode string. The easiest way is to pass
it to gettext.translation if you have the binary message
catalogues loaded.
"""
x = self.get_all(att=name)
if not x: return default
return x[val].get_text(translation)
def get_binary(self, name, default=None):
"""
Returns the unmodified binary data for the first attribute
of the given name or the passed default value if it is not
found.
"""
x = self.get_all(att=name)
if not x: return default
return x[0].get_binary()
def append(self, node):
"""
Appends a child node (must be either a TagNode or
AttributeNode).
"""
self.data.append(node)
if isinstance(node, TagNode):
if node.name not in self.speedy_tags:
self.speedy_tags[node.name] = []
self.speedy_tags[node.name].append(node)
def get_name(self):
return self.name.decode("utf8")
def __str__(self):
return "TagNode({})".format(self.get_name())
def __repr__(self):
return str(self)
class RootNode(TagNode):
"""
The root node. There is exactly one such node.
"""
def __init__(self):
TagNode.__init__(self, None)
def debug(self):
s = ""
for sub in self.data:
for subline in sub.debug().splitlines():
s += subline + "\n"
return s
def __str__(self):
return "RootNode()"
def __repr__(self):
return str(self)
class Parser:
def __init__(self, wesnoth_exe=None, config_dir=None,
data_dir=None):
"""
wesnoth_exe - Wesnoth executable to use. This should have been
configured to use the desired data and config directories.
config_dir - The Wesnoth configuration directory, can be
None to use the wesnoth default.
data_dir - The Wesnoth data directory, can be None to use
the wesnoth default.
After parsing is done the root node of the result will be
in the root attribute.
"""
self.wesnoth_exe = wesnoth_exe
self.config_dir = None
if config_dir: self.config_dir = os.path.abspath(config_dir)
self.data_dir = None
if data_dir: self.data_dir = os.path.abspath(data_dir)
self.keep_temp_dir = None
self.temp_dir = None
self.no_preprocess = (wesnoth_exe is None)
self.preprocessed = None
self.verbose = False
self.last_wml_line = "?"
self.parser_line = 0
self.line_in_file = 42424242
self.chunk_start = "?"
def parse_file(self, path, defines="") -> RootNode:
"""
Parse the given file found under path.
"""
self.path = path
if not self.no_preprocess:
self.preprocess(defines)
return self.parse()
def parse_binary(self, binary: bytes, defines="") -> RootNode:
"""
Parse a chunk of binary WML.
"""
td, tmpfilePath = tempfile.mkstemp(prefix="wmlparser_",
suffix=".cfg")
with open(tmpfilePath, 'wb') as temp:
temp.write(binary)
os.close(td)
self.path = tmpfilePath
tmpfiles_to_clean.append(tmpfilePath)
if not self.no_preprocess:
self.preprocess(defines)
return self.parse()
def parse_text(self, text, defines="") -> RootNode:
"""
Parse a text string.
"""
return self.parse_binary(text.encode("utf8"), defines)
def preprocess(self, defines):
"""
This is called by the parse functions to preprocess the
input from a normal WML .cfg file into a preprocessed
.plain file.
"""
if self.keep_temp_dir:
output = self.keep_temp_dir
else:
output = tempfile.mkdtemp(prefix="wmlparser_")
tempdirs_to_clean.append(output)
self.temp_dir = output
commandline = [self.wesnoth_exe]
if self.data_dir:
commandline += ["--data-dir", self.data_dir]
if self.config_dir:
commandline += ["--config-dir", self.config_dir]
commandline += ["--preprocess", self.path, output]
if defines:
commandline += ["--preprocess-defines", defines]
if self.verbose:
print((" ".join(commandline)))
p = subprocess.Popen(commandline,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if self.verbose:
print((out + err).decode("utf8"))
self.preprocessed = output + "/" + os.path.basename(self.path) + \
".plain"
if not os.path.exists(self.preprocessed):
first_line = open(self.path).readline().strip()
raise WMLError(self, "Preprocessor error:\n" +
" ".join(commandline) + "\n" +
"First line: " + first_line + "\n" +
out.decode("utf8") +
err.decode("utf8"))
def parse_line_without_commands_loop(self, line: str) -> str:
"""
Once the .plain commands are handled WML lines are passed to
this.
"""
if not line: return
if line.strip():
self.skip_newlines_after_plus = False
if self.in_tag:
self.handle_tag(line)
return
if self.in_arrows:
arrows = line.find(b'>>')
if arrows >= 0:
self.in_arrows = False
self.temp_string += line[:arrows]
self.temp_string_node = StringNode(self.temp_string)
self.temp_string = b""
self.temp_key_nodes[self.commas].value.append(
self.temp_string_node)
self.in_arrows = False
return line[arrows + 2:]
else:
self.temp_string += line
return
quote = line.find(b'"')
if not self.in_string:
arrows = line.find(b'<<')
if arrows >= 0 and (quote < 0 or quote > arrows):
self.parse_line_without_commands(line[:arrows])
self.in_arrows = True
return line[arrows + 2:]
if quote >= 0:
if self.in_string:
# double quote
if quote < len(line) - 1 and line[quote + 1] == b'"'[0]:
self.temp_string += line[:quote + 1]
return line[quote + 2:]
self.temp_string += line[:quote]
self.temp_string_node = StringNode(self.temp_string)
if self.translatable:
self.temp_string_node.textdomain = self.textdomain
self.translatable = False
self.temp_string = b""
if not self.temp_key_nodes:
raise WMLError(self, "Unexpected string value.")
self.temp_key_nodes[self.commas].value.append(
self.temp_string_node)
self.in_string = False
return line[quote + 1:]
else:
self.parse_outside_strings(line[:quote])
self.in_string = True
return line[quote + 1:]
else:
if self.in_string:
self.temp_string += line
else:
self.parse_outside_strings(line)
def parse_line_without_commands(self, line):
while True:
line = self.parse_line_without_commands_loop(line)
if not line:
break
def parse_outside_strings(self, line):
"""
Parse a WML fragment outside of strings.
"""
if not line: return
if line.lstrip(b" \t").startswith(b"#textdomain "):
self.textdomain = line.lstrip(b" \t")[12:].strip().decode("utf8")
return
if not self.temp_key_nodes:
line = line.lstrip()
if not line: return
# Is it a tag?
if line.startswith(b"["):
self.handle_tag(line)
# No tag, must be an attribute.
else:
self.handle_attribute(line)
else:
for i, segment in enumerate(line.split(b"+")):
segment = segment.lstrip(b" \t")
if i > 0:
# If the last segment is empty (there was a plus sign
# at the end) we need to skip newlines.
self.skip_newlines_after_plus = not segment.strip()
if not segment: continue
if segment.rstrip(b" ") == b"_":
self.translatable = True
segment = segment[1:].lstrip(b" ")
if not segment: continue
self.handle_value(segment)
def handle_tag(self, line):
end = line.find(b"]")
if end < 0:
if line.endswith(b"\n"):
raise WMLError(self, "Expected closing bracket.")
self.in_tag += line
return
tag = (self.in_tag + line[:end])[1:]
self.in_tag = b""
if tag.startswith(b"/"):
self.parent_node = self.parent_node[:-1]
elif tag.startswith(b"+") and self.parent_node and self.parent_node[-1].get_all(tag=tag[1:].decode()):
node_to_append_to = self.parent_node[-1].get_all(tag=tag[1:].decode())[-1]
self.parent_node.append(node_to_append_to)
else:
node = TagNode(tag, location=(self.line_in_file, self.chunk_start))
if self.parent_node:
self.parent_node[-1].append(node)
self.parent_node.append(node)
self.parse_outside_strings(line[end + 1:])
def handle_attribute(self, line):
assign = line.find(b"=")
remainder = None
if assign >= 0:
remainder = line[assign + 1:]
line = line[:assign]
self.commas = 0
self.temp_key_nodes = []
for att in line.split(b","):
att = att.strip()
node = AttributeNode(att, location=(self.line_in_file, self.chunk_start))
self.temp_key_nodes.append(node)
if self.parent_node:
self.parent_node[-1].append(node)
if remainder:
self.parse_outside_strings(remainder)
def handle_value(self, segment):
def add_text(segment):
segment = segment.rstrip()
if not segment: return
n = len(self.temp_key_nodes)
maxsplit = n - self.commas - 1
if maxsplit < 0: maxsplit = 0
for subsegment in segment.split(b",", maxsplit):
self.temp_string += subsegment.strip()
self.temp_string_node = StringNode(self.temp_string)
self.temp_string = b""
self.temp_key_nodes[self.commas].value.append(
self.temp_string_node)
if self.commas < n - 1:
self.commas += 1
# Finish assignment on newline, except if there is a
# plus sign before the newline.
add_text(segment)
if segment.endswith(b"\n") and not self.skip_newlines_after_plus:
self.temp_key_nodes = []
def parse(self) -> RootNode:
"""
Parse preprocessed WML into a tree of tags and attributes.
"""
# parsing state
self.temp_string = b""
self.temp_string_node = None
self.commas = 0
self.temp_key_nodes = []
self.in_string = False
self.in_arrows = False
self.textdomain = "wesnoth"
self.translatable = False
self.root = RootNode()
self.parent_node = [self.root]
self.skip_newlines_after_plus = False
self.in_tag = b""
command_marker_byte = bytes([254])
input = self.preprocessed
if not input: input = self.path
for rawline in open(input, "rb"):
compos = rawline.find(command_marker_byte)
self.parser_line += 1
# Everything from chr(254) to newline is the command.
if compos != 0:
self.line_in_file += 1
if compos >= 0:
self.parse_line_without_commands(rawline[:compos])
self.handle_command(rawline[compos + 1:-1])
else:
self.parse_line_without_commands(rawline)
if self.keep_temp_dir is None and self.temp_dir:
if self.verbose:
print(("removing " + self.temp_dir))
shutil.rmtree(self.temp_dir, ignore_errors=True)
return self.root
def handle_command(self, com):
if com.startswith(b"line "):
self.last_wml_line = com[5:]
_ = self.last_wml_line.split(b" ")
self.chunk_start = [(_[i + 1], int(_[i])) for i in range(0, len(_), 2)]
self.line_in_file = self.chunk_start[0][1]
elif com.startswith(b"textdomain "):
self.textdomain = com[11:].decode("utf8")
else:
raise WMLError(self, "Unknown parser command: " + com)
def get_all(self, **kw):
return self.root.get_all(**kw)
def get_text_val(self, name, default=None, translation=None):
return self.root.get_text_val(name, default, translation)
def jsonify(tree, verbose=False, depth=1):
"""
Convert a Parser tree into JSON
If verbose, insert a linebreak after every brace and comma (put every
item on its own line), otherwise, condense everything into a single line.
"""
import json
def node_to_dict(n):
d = {}
tags = set(x.get_name() for x in n.get_all(tag=""))
for tag in tags:
d[tag] = [node_to_dict(x) for x in n.get_all(tag=tag)]
for att in n.get_all(att=""):
d[att.get_name()] = att.get_text()
return d
print(json.dumps(node_to_dict(tree), indent=depth if verbose else None))
def xmlify(tree, verbose=False, depth=0):
import xml.etree.ElementTree as ET
def node_to_et(n):
et = ET.Element(n.get_name())
for att in n.get_all(att=""):
attel = ET.Element(att.get_name())
attel.text = att.get_text()
et.append(attel)
for tag in n.get_all(tag=""):
et.append(node_to_et(tag))
return et
ET.ElementTree(node_to_et(tree.get_all()[0])).write(
sys.stdout, encoding="unicode")
if __name__ == "__main__":
arg = argparse.ArgumentParser()
arg.add_argument("-a", "--data-dir", help="directly passed on to wesnoth.exe")
arg.add_argument("-c", "--config-dir", help="directly passed on to wesnoth.exe")
arg.add_argument("-i", "--input", help="a WML file to parse")
arg.add_argument("-k", "--keep-temp", help="specify directory where to keep temp files")
arg.add_argument("-t", "--text", help="WML text to parse")
arg.add_argument("-w", "--wesnoth", help="path to wesnoth.exe")
arg.add_argument("-d", "--defines", help="comma separated list of WML defines")
arg.add_argument("-T", "--test", action="store_true")
arg.add_argument("-j", "--to-json", action="store_true")
arg.add_argument("-v", "--verbose", action="store_true")
arg.add_argument("-x", "--to-xml", action="store_true")
args = arg.parse_args()
if not args.input and not args.text and not args.test:
sys.stderr.write("No input given. Use -h for help.\n")
sys.exit(1)
if (args.wesnoth and not os.path.exists(args.wesnoth)):
sys.stderr.write("Wesnoth executable not found.\n")
sys.exit(1)
if not args.wesnoth:
print("Warning: Without the -w option WML is not preprocessed!",
file=sys.stderr)
if args.test:
print("Running tests")
p = Parser(args.wesnoth, args.config_dir,
args.data_dir)
if args.keep_temp:
p.keep_temp_dir = args.keep_temp
if args.verbose: p.verbose = True
only = None
def test2(input, expected, note, function):
if only and note != only: return
input = input.strip()
expected = expected.strip()
p.parse_text(input)
output = function(p).strip()
if output != expected:
print("__________")
print(("FAILED " + note))
print("INPUT:")
print(input)
print("OUTPUT:")
print(output)
print("EXPECTED:")
print(expected)
print("__________")
else:
print(("PASSED " + note))
def test(input, expected, note):
test2(input, expected, note, lambda p: p.root.debug())
def test_with_preprocessor(input, expected, note):
if not args.wesnoth:
print("SKIPPED WITHOUT PREPROCESSOR " + note)
return
test(input, expected, note)
test(
"""
[test]
a=1
[/test]
""", """
[test]
a='1'
[/test]
""", "simple")
test(
"""
[+foo]
a=1
[/foo]
""", """
[+foo]
a='1'
[/foo]
""", "+foo without foo in toplevel")
test(
"""
[foo]
[+bar]
a=1
[/bar]
[/foo]
""", """
[foo]
[+bar]
a='1'
[/bar]
[/foo]
""", "+foo without foo in child")
test(
"""
[test]
[foo]
a=1
[/foo]
[/test]
""", """
[test]
[foo]
a='1'
[/foo]
[/test]
""", "subtag, part 1")
test(
"""
[test]
[foo]
a=1
[/foo]
[/test]
[+test]
[+foo]
[/foo]
[/test]
""", """
[test]
[foo]
a='1'
[/foo]
[/test]
""", "subtag, part 2")
test(
"""
[test]
a, b, c = 1, 2, 3
[/test]
""", """
[test]
a='1'
b='2'
c='3'
[/test]
""", "multi assign")
test(<|fim▁hole|>[/test]
""", """
[test]
a='1'
b='2, 3'
[/test]
""", "multi assign 2")
test(
"""
[test]
a, b, c = 1, 2
[/test]
""", """
[test]
a='1'
b='2'
c=
[/test]
""", "multi assign 3")
test_with_preprocessor(
"""
#textdomain A
#define X
_ "abc"
#enddef
#textdomain B
[test]
x = _ "abc" + {X}
[/test]
""", """
[test]
x=_<B>'abc' .. _<A>'abc'
[/test]
""", "textdomain")
test(
"""
[test]
x,y = _1,_2
[/test]
""", """
[test]
x='_1'
y='_2'
[/test]
""", "underscores")
test(
"""
[test]
a = "a ""quoted"" word"
[/test]
""",
"""
[test]
a='a "quoted" word'
[/test]
""", "quoted")
test(
"""
[test]
code = <<
"quotes" here
""blah""
>>
[/test]
""",
"""
[test]
code='
"quotes" here
""blah""
'
[/test]
""", "quoted2")
test(
"""
foo="bar"+
"baz"
""",
"""
foo='bar' .. 'baz'
""", "multi line string")
test_with_preprocessor(
"""
#define baz
"baz"
#enddef
foo="bar"+{baz}
""",
"""
foo='bar' .. 'baz'
""", "defined multi line string")
test_with_preprocessor(
"""
foo="bar" + "baz" # blah
""",
"""
foo='bar' .. 'baz'
""", "comment after +")
test_with_preprocessor(
"""
#define baz
"baz"
#enddef
foo="bar" {baz}
""",
"""
foo='bar' .. 'baz'
""", "defined string concatenation")
test_with_preprocessor(
"""
#define A BLOCK
[{BLOCK}]
[/{BLOCK}]
#enddef
{A blah}
""",
"""
[blah]
[/blah]
""", "defined tag")
test2(
"""
[test]
a=1
b=2
a=3
b=4
[/test]
""", "3, 4", "multiatt",
lambda p:
p.get_all(tag = "test")[0].get_text_val("a") + ", " +
p.get_all(tag = "test")[0].get_text_val("b"))
sys.exit(0)
p = Parser(args.wesnoth, args.config_dir, args.data_dir)
if args.keep_temp:
p.keep_temp_dir = args.keep_temp
if args.verbose: p.verbose = True
if args.input:
p.parse_file(args.input, args.defines)
elif args.text:
p.parse_text(args.text, args.defines)
if args.to_json:
jsonify(p.root, True)
print()
elif args.to_xml:
print('<?xml version="1.0" encoding="UTF-8" ?>')
print('<root>')
xmlify(p.root, True, 1)
print('</root>')
else:
print((p.root.debug()))<|fim▁end|> | """
[test]
a, b = 1, 2, 3 |
<|file_name|>_drop_test2.js<|end_file_name|><|fim▁begin|>import { expect } from 'chai'
import _drop from '../../src/array/_drop2'
describe('_drop', function(){
<|fim▁hole|> it('is a function', function(){
expect(_drop).to.be.a('function')
})
it('returns an array', function(){
const droppedArray = _drop([5,7,2])
expect(droppedArray).to.be.a('array')
})
it('returns [2] when given [5,7,2], 2', function(){
const droppedArray = _drop([5,7,2], 2)
expect(droppedArray).to.be.deep.equal([2])
})
it('returns [7, 2] when given [5,7,2]', function(){
const droppedArray = _drop([5,7,2])
expect(droppedArray).to.be.deep.equal([7, 2])
})
it('returns [] when given [5,7,2], 17', function(){
const droppedArray = _drop([5,7,2], 17)
expect(droppedArray).to.be.deep.equal([])
})
})<|fim▁end|> | |
<|file_name|>value.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::{llvm, UseRef, ValueRef};
use middle::trans::basic_block::BasicBlock;
use middle::trans::common::Block;
use std::libc::c_uint;
pub struct Value(ValueRef);
macro_rules! opt_val ( ($e:expr) => (
unsafe {
match $e {
p if p.is_not_null() => Some(Value(p)),
_ => None
}
}
))
/**
* Wrapper for LLVM ValueRef
*/
impl Value {
/// Returns the native ValueRef
pub fn get(&self) -> ValueRef {
let Value(v) = *self; v
}
/// Returns the BasicBlock that contains this value
pub fn get_parent(self) -> Option<BasicBlock> {
unsafe {
match llvm::LLVMGetInstructionParent(self.get()) {
p if p.is_not_null() => Some(BasicBlock(p)),
_ => None
}
}
}
/// Removes this value from its containing BasicBlock
pub fn erase_from_parent(self) {
unsafe {
llvm::LLVMInstructionEraseFromParent(self.get());
}
}
/// Returns the single dominating store to this value, if any
/// This only performs a search for a trivially dominating store. The store
/// must be the only user of this value, and there must not be any conditional
/// branches between the store and the given block.
pub fn get_dominating_store(self, bcx: &Block) -> Option<Value> {
match self.get_single_user().and_then(|user| user.as_store_inst()) {
Some(store) => {
store.get_parent().and_then(|store_bb| {
let mut bb = BasicBlock(bcx.llbb);
let mut ret = Some(store);
while bb.get() != store_bb.get() {
match bb.get_single_predecessor() {
Some(pred) => bb = pred,
None => { ret = None; break }
}
}
ret
})
}
_ => None
}
}
/// Returns the first use of this value, if any
pub fn get_first_use(self) -> Option<Use> {
unsafe {
match llvm::LLVMGetFirstUse(self.get()) {
u if u.is_not_null() => Some(Use(u)),
_ => None
}
}
}
/// Tests if there are no uses of this value
pub fn has_no_uses(self) -> bool {
self.get_first_use().is_none()
}
/// Returns the single user of this value
/// If there are no users or multiple users, this returns None
pub fn get_single_user(self) -> Option<Value> {
let mut iter = self.user_iter();
match (iter.next(), iter.next()) {
(Some(first), None) => Some(first),
_ => None
}
}
/// Returns an iterator for the users of this value
pub fn user_iter(self) -> Users {
Users {
next: self.get_first_use()
}
}
/// Returns the requested operand of this instruction
/// Returns None, if there's no operand at the given index
pub fn get_operand(self, i: uint) -> Option<Value> {
opt_val!(llvm::LLVMGetOperand(self.get(), i as c_uint))
}
/// Returns the Store represent by this value, if any
pub fn as_store_inst(self) -> Option<Value> {
opt_val!(llvm::LLVMIsAStoreInst(self.get()))
}
/// Tests if this value is a terminator instruction
pub fn is_a_terminator_inst(self) -> bool {
unsafe {
llvm::LLVMIsATerminatorInst(self.get()).is_not_null()
}<|fim▁hole|>}
pub struct Use(UseRef);
/**
* Wrapper for LLVM UseRef
*/
impl Use {
pub fn get(&self) -> UseRef {
let Use(v) = *self; v
}
pub fn get_user(self) -> Value {
unsafe {
Value(llvm::LLVMGetUser(self.get()))
}
}
pub fn get_next_use(self) -> Option<Use> {
unsafe {
match llvm::LLVMGetNextUse(self.get()) {
u if u.is_not_null() => Some(Use(u)),
_ => None
}
}
}
}
/// Iterator for the users of a value
pub struct Users {
priv next: Option<Use>
}
impl Iterator<Value> for Users {
fn next(&mut self) -> Option<Value> {
let current = self.next;
self.next = current.and_then(|u| u.get_next_use());
current.map(|u| u.get_user())
}
}<|fim▁end|> | } |
<|file_name|>app.js<|end_file_name|><|fim▁begin|>'use strict';
/**
* @ngdoc overview
* @name hciApp
* @description
* # hciApp
*
* Main module of the application.
*/
angular
.module('hciApp', [
'ngAnimate',<|fim▁hole|> 'ngResource',
'ngRoute',
'ngSanitize',
'ngTouch',
'ui.bootstrap',
'ngMaterial'
])
.config(function ($routeProvider) {
$routeProvider
.when('/main', {
templateUrl: 'views/main.html',
controller: 'MainCtrl'
})
.when('/main/:mode', {
templateUrl: 'views/main.html',
controller: 'MainCtrl'
})
.when('/about', {
templateUrl: 'views/about.html',
controller: 'AboutCtrl'
})
.when('/ideas', {
templateUrl: 'views/ideas.html',
controller: 'IdeasCtrl'
})
.when('/details', {
templateUrl: 'views/details.html',
controller: 'DetailsCtrl'
})
.otherwise({
redirectTo: '/main'
});
});<|fim▁end|> | 'ngCookies', |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(custom_derive, plugin)]
#![plugin(heapsize_plugin, plugins, serde_macros)]
#![crate_name = "gfx_traits"]
#![crate_type = "rlib"]
#![deny(unsafe_code)]
extern crate azure;
extern crate euclid;
extern crate heapsize;
extern crate layers;
extern crate msg;
extern crate serde;
pub mod color;
mod paint_listener;
pub use paint_listener::PaintListener;
use azure::azure_hl::Color;
use euclid::Matrix4D;
use euclid::rect::Rect;
use msg::constellation_msg::PipelineId;
use std::fmt::{self, Debug, Formatter};
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
/// The next ID that will be used for a special stacking context.
///
/// A special stacking context is a stacking context that is one of (a) the outer stacking context
/// of an element with `overflow: scroll`; (b) generated content; (c) both (a) and (b).
static NEXT_SPECIAL_STACKING_CONTEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;<|fim▁hole|>/// context.
///
/// Note that we assume that the top 16 bits of the address space are unused on the platform.
const SPECIAL_STACKING_CONTEXT_ID_MASK: usize = 0xffff;
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LayerKind {
NoTransform,
HasTransform,
}
#[derive(Clone, PartialEq, Eq, Copy, Hash, Deserialize, Serialize, HeapSizeOf)]
pub enum LayerType {
/// A layer for the fragment body itself.
FragmentBody,
/// An extra layer created for a DOM fragments with overflow:scroll.
OverflowScroll,
/// A layer created to contain ::before pseudo-element content.
BeforePseudoContent,
/// A layer created to contain ::after pseudo-element content.
AfterPseudoContent,
}
/// The scrolling policy of a layer.
#[derive(Clone, PartialEq, Eq, Copy, Deserialize, Serialize, Debug, HeapSizeOf)]
pub enum ScrollPolicy {
/// These layers scroll when the parent receives a scrolling message.
Scrollable,
/// These layers do not scroll when the parent receives a scrolling message.
FixedPosition,
}
#[derive(Clone, PartialEq, Eq, Copy, Hash, Deserialize, Serialize, HeapSizeOf)]
pub struct LayerId(
/// The type of the layer. This serves to differentiate layers that share fragments.
LayerType,
/// The identifier for this layer's fragment, derived from the fragment memory address.
usize,
/// An index for identifying companion layers, synthesized to ensure that
/// content on top of this layer's fragment has the proper rendering order.
usize
);
impl Debug for LayerId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let LayerId(layer_type, id, companion) = *self;
let type_string = match layer_type {
LayerType::FragmentBody => "-FragmentBody",
LayerType::OverflowScroll => "-OverflowScroll",
LayerType::BeforePseudoContent => "-BeforePseudoContent",
LayerType::AfterPseudoContent => "-AfterPseudoContent",
};
write!(f, "{}{}-{}", id, type_string, companion)
}
}
impl LayerId {
/// FIXME(#2011, pcwalton): This is unfortunate. Maybe remove this in the future.
pub fn null() -> LayerId {
LayerId(LayerType::FragmentBody, 0, 0)
}
pub fn new_of_type(layer_type: LayerType, fragment_id: usize) -> LayerId {
LayerId(layer_type, fragment_id, 0)
}
pub fn companion_layer_id(&self) -> LayerId {
let LayerId(layer_type, id, companion) = *self;
LayerId(layer_type, id, companion + 1)
}
pub fn original(&self) -> LayerId {
let LayerId(layer_type, id, _) = *self;
LayerId(layer_type, id, 0)
}
pub fn kind(&self) -> LayerType {
self.0
}
}
/// All layer-specific information that the painting task sends to the compositor other than the
/// buffer contents of the layer itself.
#[derive(Copy, Clone, HeapSizeOf)]
pub struct LayerProperties {
/// An opaque ID. This is usually the address of the flow and index of the box within it.
pub id: LayerId,
/// The id of the parent layer.
pub parent_id: Option<LayerId>,
/// The position and size of the layer in pixels.
pub rect: Rect<f32>,
/// The background color of the layer.
pub background_color: Color,
/// The scrolling policy of this layer.
pub scroll_policy: ScrollPolicy,
/// The transform for this layer
pub transform: Matrix4D<f32>,
/// The perspective transform for this layer
pub perspective: Matrix4D<f32>,
/// The subpage that this layer represents. If this is `Some`, this layer represents an
/// iframe.
pub subpage_pipeline_id: Option<PipelineId>,
/// Whether this layer establishes a new 3d rendering context.
pub establishes_3d_context: bool,
/// Whether this layer scrolls its overflow area.
pub scrolls_overflow_area: bool,
}
/// A newtype struct for denoting the age of messages; prevents race conditions.
#[derive(PartialEq, Eq, Debug, Copy, Clone, PartialOrd, Ord, Deserialize, Serialize)]
pub struct Epoch(pub u32);
impl Epoch {
pub fn next(&mut self) {
self.0 += 1;
}
}
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub struct FrameTreeId(pub u32);
impl FrameTreeId {
pub fn next(&mut self) {
self.0 += 1;
}
}
/// A unique ID for every stacking context.
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, HeapSizeOf, PartialEq, Serialize)]
pub struct StackingContextId(
/// The identifier for this StackingContext, derived from the Flow's memory address
/// and fragment type. As a space optimization, these are combined into a single word.
usize
);
impl StackingContextId {
#[inline]
pub fn new(id: usize) -> StackingContextId {
StackingContextId::new_of_type(id, FragmentType::FragmentBody)
}
/// Returns a new stacking context ID for a special stacking context.
fn next_special_id() -> usize {
// We shift this left by 2 to make room for the fragment type ID.
((NEXT_SPECIAL_STACKING_CONTEXT_ID.fetch_add(1, Ordering::SeqCst) + 1) << 2) &
SPECIAL_STACKING_CONTEXT_ID_MASK
}
#[inline]
pub fn new_of_type(id: usize, fragment_type: FragmentType) -> StackingContextId {
debug_assert_eq!(id & (fragment_type as usize), 0);
if fragment_type == FragmentType::FragmentBody {
StackingContextId(id)
} else {
StackingContextId(StackingContextId::next_special_id() | (fragment_type as usize))
}
}
/// Returns an ID for the stacking context that forms the outer stacking context of an element
/// with `overflow: scroll`.
#[inline(always)]
pub fn new_outer(fragment_type: FragmentType) -> StackingContextId {
StackingContextId(StackingContextId::next_special_id() | (fragment_type as usize))
}
#[inline]
pub fn fragment_type(&self) -> FragmentType {
FragmentType::from_usize(self.0 & 3)
}
#[inline]
pub fn id(&self) -> usize {
self.0 & !3
}
/// Returns the stacking context ID for the outer document/layout root.
#[inline]
pub fn root() -> StackingContextId {
StackingContextId(0)
}
/// Returns true if this is a special stacking context.
///
/// A special stacking context is a stacking context that is one of (a) the outer stacking
/// context of an element with `overflow: scroll`; (b) generated content; (c) both (a) and (b).
#[inline]
pub fn is_special(&self) -> bool {
(self.0 & !SPECIAL_STACKING_CONTEXT_ID_MASK) == 0
}
}
/// The type of fragment that a stacking context represents.
///
/// This can only ever grow to maximum 4 entries. That's because we cram the value of this enum
/// into the lower 2 bits of the `StackingContextId`, which otherwise contains a 32-bit-aligned
/// heap address.
#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, Deserialize, Serialize, HeapSizeOf)]
pub enum FragmentType {
/// A StackingContext for the fragment body itself.
FragmentBody,
/// A StackingContext created to contain ::before pseudo-element content.
BeforePseudoContent,
/// A StackingContext created to contain ::after pseudo-element content.
AfterPseudoContent,
}
impl FragmentType {
#[inline]
pub fn from_usize(n: usize) -> FragmentType {
debug_assert!(n < 3);
match n {
0 => FragmentType::FragmentBody,
1 => FragmentType::BeforePseudoContent,
_ => FragmentType::AfterPseudoContent,
}
}
}<|fim▁end|> |
/// If none of the bits outside this mask are set, the stacking context is a special stacking |
<|file_name|>SecretFormField.tsx<|end_file_name|><|fim▁begin|>import omit from 'lodash/omit';
import React, { InputHTMLAttributes, FunctionComponent } from 'react';
import { FormField } from '../FormField/FormField';
import { Button } from '../Button/Button';
import { css, cx } from 'emotion';
export interface Props extends Omit<InputHTMLAttributes<HTMLInputElement>, 'onReset'> {
// Function to use when reset is clicked. Means you have to reset the input value yourself as this is uncontrolled
// component (or do something else if required).
onReset: (event: React.SyntheticEvent<HTMLButtonElement>) => void;
isConfigured: boolean;
label?: string;
tooltip?: string;
labelWidth?: number;
inputWidth?: number;
// Placeholder of the input field when in non configured state.
placeholder?: string;
}
const getSecretFormFieldStyles = () => {
return {
noRadiusInput: css`
border-bottom-right-radius: 0 !important;
border-top-right-radius: 0 !important;
`,<|fim▁hole|> border-bottom-left-radius: 0 !important;
border-top-left-radius: 0 !important;
`,
};
};
/**
* Form field that has 2 states configured and not configured. If configured it will not show its contents and adds
* a reset button that will clear the input and makes it accessible. In non configured state it behaves like normal
* form field. This is used for passwords or anything that is encrypted on the server and is later returned encrypted
* to the user (like datasource passwords).
*/
export const SecretFormField: FunctionComponent<Props> = ({
label = 'Password',
labelWidth,
inputWidth = 12,
onReset,
isConfigured,
tooltip,
placeholder = 'Password',
...inputProps
}: Props) => {
const styles = getSecretFormFieldStyles();
return (
<FormField
label={label!}
tooltip={tooltip!}
labelWidth={labelWidth}
inputEl={
isConfigured ? (
<>
<input
type="text"
className={cx(`gf-form-input width-${inputWidth}`, styles.noRadiusInput)}
disabled={true}
value="configured"
{...omit(inputProps, 'value')}
/>
<Button onClick={onReset} variant="secondary">
Reset
</Button>
</>
) : (
<input
type="password"
className={`gf-form-input width-${inputWidth}`}
placeholder={placeholder}
{...inputProps}
/>
)
}
/>
);
};
SecretFormField.displayName = 'SecretFormField';<|fim▁end|> | noRadiusButton: css` |
<|file_name|>pythonicos.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from random import shuffle
class Carta():
def __init__(self, numero, naipe):
self.numero = numero
self.naipe = naipe
def __repr__(self):
return '%s de %s' % (self.numero, self.naipe)
class Baralho():
def __init__(self):
self._cartas = [Carta(numero, naipe) for numero in 'As 1 2 3 4 5 6 7 8 9 10 Q J K'.split()
for naipe in 'Ouros Espadas Copas Paus'.split()]
def __getitem__(self, index):
return self._cartas[index]
def __setitem__(self, key, value):
self._cartas[key] = value
def __len__(self):
return len(self._cartas)
print Carta('As', 'Paus')
baralho = Baralho()
baralho[55] = Carta('As', 'Paus')
shuffle(baralho)
for carta in baralho:
print carta
print baralho[0]
class Vetor():
def __init__(self, x, y):
self.y = y
self.x = x
def __repr__(self):
return '(%s, %s)' % (self.x, self.y)
def __add__(self, other):
return Vetor(self.x + other.x, self.y + other.y)
def __eq__(self, other):
return self.x==other.x and self.y==other.y
<|fim▁hole|>vetor2 = Vetor(1, 1)
print vetor1 + vetor2
print vetor1 == vetor2<|fim▁end|> | vetor1 = Vetor(1, 1) |
<|file_name|>vacation.py<|end_file_name|><|fim▁begin|>def hotel_cost(nights):
return 140 * nights
def plane_ride_cost(city):<|fim▁hole|> elif city == "Pittsburgh":
return 222
elif city == "Los Angeles":
return 475
def rental_car_cost(days):
total_car = days * 40
if days >= 7:
total_car -= 50
elif days >= 3:
total_car -= 20
return total_car
def trip_cost(city, days):
return rental_car_cost(days) + plane_ride_cost(city) + hotel_cost(days)
#invoke function here
print "The total cost for your trip comes to : ", trip_cost("Tampa", 7)<|fim▁end|> | if city == "Charlotte":
return 183
elif city == "Tampa":
return 220 |
<|file_name|>test_envcheckr.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_envcheckr
----------------------------------<|fim▁hole|>
import pytest
from envcheckr import envcheckr
def test_parse_lines():
lines_a = envcheckr.parse_lines('tests/env')
assert len(lines_a) == 3
lines_b = envcheckr.parse_lines('tests/env.example')
assert len(lines_b) == 7
def test_parse_key():
lines = envcheckr.parse_lines('tests/env')
assert(envcheckr.parse_key(lines[0])) == 'FRUIT'
assert(envcheckr.parse_key(lines[1])) == 'DRINK'
assert(envcheckr.parse_key(lines[2])) == 'ANIMAL'
def test_get_missing_keys():
file_a = 'tests/env'
file_b = 'tests/env.example'
missing_keys = envcheckr.get_missing_keys(file_a, file_b)
assert(len(missing_keys)) == 4
assert(missing_keys[0]) == 'FOOD=Pizza\n'
assert(missing_keys[1]) == 'CODE=Python\n'
assert(missing_keys[2]) == 'SPORT=Football\n'
assert(missing_keys[3]) == 'CITY=Brisbane\n'<|fim▁end|> |
Tests for `envcheckr` module.
"""
|
<|file_name|>tree.py<|end_file_name|><|fim▁begin|>"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than or equal "
"to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes, self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""<|fim▁hole|>
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state)<|fim▁end|> | Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17 |
<|file_name|>handlers.py<|end_file_name|><|fim▁begin|>import urllib
from urlparse import urlparse
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATICFILES_URL setting, and serves those files.
"""
def __init__(self, application, media_dir=None):
self.application = application
if media_dir:
self.media_dir = media_dir
else:
self.media_dir = self.get_media_dir()
self.media_url = urlparse(self.get_media_url())
if settings.DEBUG:
utils.check_settings()
super(StaticFilesHandler, self).__init__()
def get_media_dir(self):
return settings.STATICFILES_ROOT
def get_media_url(self):
return settings.STATICFILES_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the media_url
* the request's path isn't under the media path (or equal)
* settings.DEBUG isn't True
"""
return (self.media_url[2] != path and
path.startswith(self.media_url[2]) and not self.media_url[1])
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
The passed URL is assumed to begin with ``media_url``. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ``media_url``.
relative_url = url[len(self.media_url[2]):]
return urllib.url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
<|fim▁hole|> if not self._should_handle(environ['PATH_INFO']):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)<|fim▁end|> | |
<|file_name|>_shared-libs.module.ts<|end_file_name|><|fim▁begin|>import { NgModule } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { HttpModule } from '@angular/http';
import { CommonModule } from '@angular/common';
import { NgbModule } from '@ng-bootstrap/ng-bootstrap';
import { NgJhipsterModule } from 'ng-jhipster';
import { InfiniteScrollModule } from 'angular2-infinite-scroll';
@NgModule({
imports: [
NgbModule.forRoot(),
NgJhipsterModule.forRoot({
<%_ if (enableTranslation) { _%>
i18nEnabled: true,
defaultI18nLang: '<%= nativeLanguage %>'<|fim▁hole|> <%_ } _%>
}),
InfiniteScrollModule
],
exports: [
FormsModule,
HttpModule,
CommonModule,
NgbModule,
NgJhipsterModule,
InfiniteScrollModule
]
})
export class <%=angular2AppName%>SharedLibsModule {}<|fim▁end|> | |
<|file_name|>soccer_PK_reward.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import rospy
from std_msgs.msg import Float32
import numpy as np
import soccer_PK.utils
rospy.init_node("reward")
pub = rospy.Publisher("reward", Float32, queue_size=10)
rate = rospy.Rate(3)
rospy.wait_for_service('/gazebo/get_model_state')
soccer_PK.utils.reset_world()
# intial postion
ball_prev = 3.25
episode = 1<|fim▁hole|> toc = tic
prev_reward = None
while toc - tic < 10:
done = False
# pub.publish(reward)
ball_locationx ,ball_locationy = soccer_PK.utils.get_ball_location()
# Goal
if ball_locationx > 4.5:
rospy.loginfo("GOAL!!!")
# save log file ($HOME/.ros/)
f = open('episode_result.log', 'a')
f.write('episode'+str(episode)+': 4.5\n')
f.close()
# reset
episode += 1
reward = 10
done = True
rospy.set_param("reward_value",[reward, done])
tic = rospy.get_time()
soccer_PK.utils.reset_world()
rospy.sleep(1)
# if the ball don't achieve goal
reward = (ball_prev - ball_locationx) / ball_prev
if prev_reward != reward:
rospy.set_param("reward_value",[reward, done])
prev_reward = reward
toc = rospy.get_time()
reward = -10
done = True
prev_reward = reward
# pub.publish(reward)
rospy.set_param("reward_value",[reward, done])
ball_locationx ,ball_locationy = soccer_PK.utils.get_ball_location()
f = open('episode_result.log', 'a')
f.write('episode'+str(episode)+': '+str(ball_locationx)+'\n')
f.close()
episode += 1
soccer_PK.utils.reset_world()
rospy.sleep(1)
rate.sleep()<|fim▁end|> |
while not rospy.is_shutdown():
tic = rospy.get_time() |
<|file_name|>Pathway.ts<|end_file_name|><|fim▁begin|>import { Universe } from '@ephox/boss';
import * as Simplify from '../../pathway/Simplify';
type SimplifyFn = <E, D>(universe: Universe<E, D>, elements: E[]) => E[];
/**<|fim▁hole|> * @see Simplify.simplify()
*/
const simplify: SimplifyFn = Simplify.simplify;
export {
simplify
};<|fim▁end|> | |
<|file_name|>DbHelper.java<|end_file_name|><|fim▁begin|>package com.igoldin.qa.school.appmanager;
import com.igoldin.qa.school.model.ContactData;
import com.igoldin.qa.school.model.Contacts;
import com.igoldin.qa.school.model.GroupData;
import com.igoldin.qa.school.model.Groups;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.boot.MetadataSources;
import org.hibernate.boot.registry.StandardServiceRegistry;
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
import java.util.List;
public class DbHelper {
private final SessionFactory sessionFactory;
public DbHelper() {
// A SessionFactory is set up once for an application!
final StandardServiceRegistry registry = new StandardServiceRegistryBuilder()
.configure() // configures settings from hibernate.cfg.xml
.build();
sessionFactory = new MetadataSources( registry ).buildMetadata().buildSessionFactory();
}
public Groups groups() {
Session session = sessionFactory.openSession();
session.beginTransaction();
List<GroupData> result = session.createQuery("from GroupData" ).list();
session.getTransaction().commit();
session.close();
return new Groups(result);
}
public Contacts contacts() {
Session session = sessionFactory.openSession();
session.beginTransaction();
List<ContactData> result = session.createQuery("from ContactData where deprecated = '0000-00-00 00:00:00'" ).list();
session.getTransaction().commit();
session.close();
return new Contacts(result);
}<|fim▁hole|>}<|fim▁end|> | |
<|file_name|>af2de80654b6_add_default_compile_yara_rule_on_save_.py<|end_file_name|><|fim▁begin|>"""Add default COMPILE_YARA_RULE_ON_SAVE setting
Revision ID: af2de80654b6
Revises: 2f0f6d26a505
Create Date: 2018-11-11 19:26:53.631142
"""
from alembic import op
import sqlalchemy as sa
from app.models import cfg_settings
import datetime
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'af2de80654b6'
down_revision = '2f0f6d26a505'
branch_labels = None
depends_on = None
def upgrade():
date_created = datetime.datetime.now().isoformat()
date_modified = datetime.datetime.now().isoformat()
op.bulk_insert(
cfg_settings.Cfg_settings.__table__,
[
{"key": "COMPILE_YARA_RULE_ON_SAVE", "value": "1", "public": True, "date_created": date_created,
"date_modified": date_modified,
"description": "If true, don't save yara rule changes if they are in the draft or release state unless they compile."},
]
)<|fim▁hole|>
def downgrade():
keys = ["COMPILE_YARA_RULE_ON_SAVE"]
for key in keys:
op.execute("""DELETE from cfg_settings where `key`='%s';""" % (key))<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Parallel testing, supporting arbitrary collection ordering
The Workflow
------------
- Master py.test process starts up, inspects config to decide how many slave to start, if at all
- env['parallel_base_urls'] is inspected first
- py.test config.option.appliances and the related --appliance cmdline flag are used
if env['parallel_base_urls'] isn't set
- if neither are set, no parallelization happens
- Slaves are started
- Master runs collection, blocks until slaves report their collections
- Slaves each run collection and submit them to the master, then block inside their runtest loop,
waiting for tests to run
- Master diffs slave collections against its own; the test ids are verified to match
across all nodes
- Master enters main runtest loop, uses a generator to build lists of test groups which are then
sent to slaves, one group at a time
- For each phase of each test, the slave serializes test reports, which are then unserialized on
the master and handed to the normal pytest reporting hooks, which is able to deal with test
reports arriving out of order
- Before running the last test in a group, the slave will request more tests from the master
- If more tests are received, they are run
- If no tests are received, the slave will shut down after running its final test
- After all slaves are shut down, the master will do its end-of-session reporting as usual, and
shut down
"""
from itertools import groupby
import difflib
import json
import os
import signal
import subprocess
from collections import defaultdict, deque, namedtuple
from datetime import datetime
from itertools import count
import attr
from threading import Thread
from time import sleep, time
import pytest
import zmq
from _pytest import runner
from fixtures import terminalreporter
from fixtures.parallelizer import remote
from fixtures.pytest_store import store
from cfme.utils import at_exit, conf
from cfme.utils.log import create_sublogger
from cfme.utils.path import conf_path
# Initialize slaveid to None, indicating this as the master process
# slaves will set this to a unique string when they're initialized
conf.runtime['env']['slaveid'] = None
if not conf.runtime['env'].get('ts'):
ts = str(time())
conf.runtime['env']['ts'] = ts
def pytest_addhooks(pluginmanager):
import hooks
pluginmanager.add_hookspecs(hooks)
@pytest.mark.trylast
def pytest_configure(config):
"""Configures the parallel session, then fires pytest_parallel_configured."""
reporter = terminalreporter.reporter()
holder = config.pluginmanager.get_plugin("appliance-holder")
appliances = holder.appliances
if len(appliances) > 1:
session = ParallelSession(config, appliances)
config.pluginmanager.register(session, "parallel_session")
store.parallelizer_role = 'master'
reporter.write_line(
'As a parallelizer master kicking off parallel session for these {} appliances'.format(
len(appliances)),
green=True)
config.hook.pytest_parallel_configured(parallel_session=session)
else:
reporter.write_line('No parallelization required', green=True)
config.hook.pytest_parallel_configured(parallel_session=None)
def handle_end_session(signal, frame):
# when signaled, end the current test session immediately
if store.parallel_session:
store.parallel_session.session_finished = True
signal.signal(signal.SIGQUIT, handle_end_session)
@attr.s(hash=False)
class SlaveDetail(object):
slaveid_generator = ('slave{:02d}'.format(i) for i in count())
appliance = attr.ib()
id = attr.ib(default=attr.Factory(
lambda: next(SlaveDetail.slaveid_generator)))
forbid_restart = attr.ib(default=False, init=False)
tests = attr.ib(default=attr.Factory(set), repr=False)
process = attr.ib(default=None, repr=False)
provider_allocation = attr.ib(default=attr.Factory(list), repr=False)
def start(self):
if self.forbid_restart:
return
devnull = open(os.devnull, 'w')
# worker output redirected to null; useful info comes via messages and logs
self.process = subprocess.Popen(
['python', remote.__file__, self.id, self.appliance.as_json, conf.runtime['env']['ts']],
stdout=devnull,
)
at_exit(self.process.kill)
def poll(self):
if self.process is not None:
return self.process.poll()
class ParallelSession(object):
def __init__(self, config, appliances):
self.config = config
self.session = None
self.session_finished = False
self.countfailures = 0
self.collection = []
self.sent_tests = 0
self.log = create_sublogger('master')
self.maxfail = config.getvalue("maxfail")
self._failed_collection_errors = {}
self.terminal = store.terminalreporter
self.trdist = None
self.slaves = {}
self.test_groups = self._test_item_generator()
self._pool = []
from cfme.utils.conf import cfme_data
self.provs = sorted(set(cfme_data['management_systems'].keys()),
key=len, reverse=True)
self.used_prov = set()
self.failed_slave_test_groups = deque()
self.slave_spawn_count = 0
self.appliances = appliances
# set up the ipc socket
zmq_endpoint = 'ipc://{}'.format(
config.cache.makedir('parallelize').join(str(os.getpid())))
ctx = zmq.Context.instance()
self.sock = ctx.socket(zmq.ROUTER)
self.sock.bind(zmq_endpoint)
# clean out old slave config if it exists
slave_config = conf_path.join('slave_config.yaml')
slave_config.check() and slave_config.remove()
# write out the slave config
conf.runtime['slave_config'] = {
'args': self.config.args,
'options': dict( # copy to avoid aliasing
self.config.option.__dict__,
use_sprout=False, # Slaves don't use sprout
),
'zmq_endpoint': zmq_endpoint,
}
if hasattr(self, "slave_appliances_data"):
conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
conf.save('slave_config')
for appliance in self.appliances:
slave_data = SlaveDetail(appliance=appliance)
self.slaves[slave_data.id] = slave_data
for slave in sorted(self.slaves):
self.print_message("using appliance {}".format(self.slaves[slave].appliance.url),
slave, green=True)
def _slave_audit(self):
# XXX: There is currently no mechanism to add or remove slave_urls, short of
# firing up the debugger and doing it manually. This is making room for
# planned future abilities to dynamically add and remove slaves via automation
# check for unexpected slave shutdowns and redistribute tests
for slave in self.slaves.values():
returncode = slave.poll()
if returncode:
slave.process = None
if returncode == -9:
msg = '{} killed due to error, respawning'.format(slave.id)
else:
msg = '{} terminated unexpectedly with status {}, respawning'.format(
slave.id, returncode)
if slave.tests:
failed_tests, slave.tests = slave.tests, set()
num_failed_tests = len(failed_tests)
self.sent_tests -= num_failed_tests
msg += ' and redistributing {} tests'.format(num_failed_tests)
self.failed_slave_test_groups.append(failed_tests)
self.print_message(msg, purple=True)
# If a slave was terminated for any reason, kill that slave
# the terminated flag implies the appliance has died :(
for slave in list(self.slaves.values()):
if slave.forbid_restart:
if slave.process is None:
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.appliance.url)
del self.slaves[slave.id]
else:
# no hook call here, a future audit will handle the fallout
self.print_message(
"{}'s appliance has died, deactivating slave".format(slave.id))
self.interrupt(slave)
else:
if slave.process is None:
slave.start()
self.slave_spawn_count += 1
def send(self, slave, event_data):
"""Send data to slave.
``event_data`` will be serialized as JSON, and so must be JSON serializable
"""
event_json = json.dumps(event_data)
self.sock.send_multipart([slave.id, '', event_json])
def recv(self):
# poll the zmq socket, populate the recv queue deque with responses
events = zmq.zmq_poll([(self.sock, zmq.POLLIN)], 50)
if not events:
return None, None, None
slaveid, _, event_json = self.sock.recv_multipart(flags=zmq.NOBLOCK)
event_data = json.loads(event_json)
event_name = event_data.pop('_event_name')
if slaveid not in self.slaves:
self.log.error("message from terminated worker %s %s %s",
slaveid, event_name, event_data)
return None, None, None
return self.slaves[slaveid], event_data, event_name
def print_message(self, message, prefix='master', **markup):
"""Print a message from a node to the py.test console
Args:
message: The message to print
**markup: If set, overrides the default markup when printing the message
"""
# differentiate master and slave messages by default
prefix = getattr(prefix, 'id', prefix)
if not markup:
if prefix == 'master':
markup = {'blue': True}
else:
markup = {'cyan': True}
stamp = datetime.now().strftime("%Y%m%d %H:%M:%S")
self.terminal.write_ensure_prefix(
'({})[{}] '.format(prefix, stamp), message, **markup)
def ack(self, slave, event_name):
"""Acknowledge a slave's message"""
self.send(slave, 'ack {}'.format(event_name))
def monitor_shutdown(self, slave):
# non-daemon so slaves get every opportunity to shut down cleanly
shutdown_thread = Thread(target=self._monitor_shutdown_t,
args=(slave.id, slave.process))
shutdown_thread.start()
def _monitor_shutdown_t(self, slaveid, process):
# a KeyError here means self.slaves got mangled, indicating a problem elsewhere
if process is None:
self.log.warning('Slave was missing when trying to monitor shutdown')
def sleep_and_poll():
start_time = time()
# configure the polling logic
polls = 0
# how often to poll
poll_sleep_time = .5
# how often to report (calculated to be around once a minute based on poll_sleep_time)
poll_report_modulo = 60 / poll_sleep_time
# maximum time to wait
poll_num_sec = 300
while (time() - start_time) < poll_num_sec:
polls += 1
yield
if polls % poll_report_modulo == 0:
remaining_time = int(poll_num_sec - (time() - start_time))
self.print_message(
'{} still shutting down, '
'will continue polling for {} seconds '
.format(slaveid, remaining_time), blue=True)
sleep(poll_sleep_time)
# start the poll
for poll in sleep_and_poll():
ec = process.poll()
if ec is None:
continue
else:
if ec == 0:
self.print_message('{} exited'.format(slaveid), green=True)
else:
self.print_message('{} died'.format(slaveid), red=True)
break
else:
self.print_message('{} failed to shut down gracefully; killed'.format(slaveid),
red=True)
process.kill()
def interrupt(self, slave, **kwargs):
"""Nicely ask a slave to terminate"""<|fim▁hole|> slave.forbid_restart = True
if slave.poll() is None:
slave.process.send_signal(subprocess.signal.SIGINT)
self.monitor_shutdown(slave, **kwargs)
def kill(self, slave, **kwargs):
"""Rudely kill a slave"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.kill()
self.monitor_shutdown(slave, **kwargs)
def send_tests(self, slave):
"""Send a slave a group of tests"""
try:
tests = list(self.failed_slave_test_groups.popleft())
except IndexError:
tests = self.get(slave)
self.send(slave, tests)
slave.tests.update(tests)
collect_len = len(self.collection)
tests_len = len(tests)
self.sent_tests += tests_len
if tests:
self.print_message('sent {} tests to {} ({}/{}, {:.1f}%)'.format(
tests_len, slave.id, self.sent_tests, collect_len,
self.sent_tests * 100. / collect_len
))
return tests
def pytest_sessionstart(self, session):
"""pytest sessionstart hook
- sets up distributed terminal reporter
- sets up zmp ipc socket for the slaves to use
- writes pytest options and args to slave_config.yaml
- starts the slaves
- register atexit kill hooks to destroy slaves at the end if things go terribly wrong
"""
# If reporter() gave us a fake terminal reporter in __init__, the real
# terminal reporter is registered by now
self.terminal = store.terminalreporter
self.trdist = TerminalDistReporter(self.config, self.terminal)
self.config.pluginmanager.register(self.trdist, "terminaldistreporter")
self.session = session
def pytest_runtestloop(self):
"""pytest runtest loop
- Disable the master terminal reporter hooks, so we can add our own handlers
that include the slaveid in the output
- Send tests to slaves when they ask
- Log the starting of tests and test results, including slave id
- Handle clean slave shutdown when they finish their runtest loops
- Restore the master terminal reporter after testing so we get the final report
"""
# Build master collection for slave diffing and distribution
self.collection = [item.nodeid for item in self.session.items]
# Fire up the workers after master collection is complete
# master and the first slave share an appliance, this is a workaround to prevent a slave
# from altering an appliance while master collection is still taking place
for slave in self.slaves.values():
slave.start()
try:
self.print_message("Waiting for {} slave collections".format(len(self.slaves)),
red=True)
# Turn off the terminal reporter to suppress the builtin logstart printing
terminalreporter.disable()
while True:
# spawn/kill/replace slaves if needed
self._slave_audit()
if not self.slaves:
# All slaves are killed or errored, we're done with tests
self.print_message('all slaves have exited', yellow=True)
self.session_finished = True
if self.session_finished:
break
slave, event_data, event_name = self.recv()
if event_name == 'message':
message = event_data.pop('message')
markup = event_data.pop('markup')
# messages are special, handle them immediately
self.print_message(message, slave, **markup)
self.ack(slave, event_name)
elif event_name == 'collectionfinish':
slave_collection = event_data['node_ids']
# compare slave collection to the master, all test ids must be the same
self.log.debug('diffing {} collection'.format(slave.id))
diff_err = report_collection_diff(
slave.id, self.collection, slave_collection)
if diff_err:
self.print_message(
'collection differs, respawning', slave.id,
purple=True)
self.print_message(diff_err, purple=True)
self.log.error('{}'.format(diff_err))
self.kill(slave)
slave.start()
else:
self.ack(slave, event_name)
elif event_name == 'need_tests':
self.send_tests(slave)
self.log.info('starting master test distribution')
elif event_name == 'runtest_logstart':
self.ack(slave, event_name)
self.trdist.runtest_logstart(
slave.id,
event_data['nodeid'],
event_data['location'])
elif event_name == 'runtest_logreport':
self.ack(slave, event_name)
report = unserialize_report(event_data['report'])
if report.when in ('call', 'teardown'):
slave.tests.discard(report.nodeid)
self.trdist.runtest_logreport(slave.id, report)
elif event_name == 'internalerror':
self.ack(slave, event_name)
self.print_message(event_data['message'], slave, purple=True)
self.kill(slave)
elif event_name == 'shutdown':
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.appliance.url)
self.ack(slave, event_name)
del self.slaves[slave.id]
self.monitor_shutdown(slave)
# total slave spawn count * 3, to allow for each slave's initial spawn
# and then each slave (on average) can fail two times
if self.slave_spawn_count >= len(self.appliances) * 3:
self.print_message(
'too many slave respawns, exiting',
red=True, bold=True)
raise KeyboardInterrupt('Interrupted due to slave failures')
except Exception as ex:
self.log.error('Exception in runtest loop:')
self.log.exception(ex)
self.print_message(str(ex))
raise
finally:
terminalreporter.enable()
# Suppress other runtestloop calls
return True
def _test_item_generator(self):
for tests in self._modscope_item_generator():
yield tests
def _modscope_item_generator(self):
# breaks out tests by module, can work just about any way we want
# as long as it yields lists of tests id from the master collection
sent_tests = 0
collection_len = len(self.collection)
def get_fspart(nodeid):
return nodeid.split('::')[0]
for fspath, gen_moditems in groupby(self.collection, key=get_fspart):
for tests in self._modscope_id_splitter(gen_moditems):
sent_tests += len(tests)
self.log.info('{} tests remaining to send'.format(
collection_len - sent_tests))
yield list(tests)
def _modscope_id_splitter(self, module_items):
# given a list of item ids from one test module, break up tests into groups with the same id
parametrized_ids = defaultdict(list)
for item in module_items:
if '[' in item:
# split on the leftmost bracket, then strip everything after the rightmight bracket
# so 'test_module.py::test_name[parametrized_id]' becomes 'parametrized_id'
parametrized_id = item.split('[')[1].rstrip(']')
else:
# splits failed, item has no parametrized id
parametrized_id = 'no params'
parametrized_ids[parametrized_id].append(item)
for id, tests in parametrized_ids.items():
if tests:
self.log.info('sent tests with param {} {!r}'.format(id, tests))
yield tests
def get(self, slave):
def provs_of_tests(test_group):
found = set()
for test in test_group:
found.update(pv for pv in self.provs
if '[' in test and pv in test)
return sorted(found)
if not self._pool:
for test_group in self.test_groups:
self._pool.append(test_group)
self.used_prov.update(provs_of_tests(test_group))
if self.used_prov:
self.ratio = float(len(self.slaves)) / len(self.used_prov)
else:
self.ratio = 0.0
if not self._pool:
return []
appliance_num_limit = 1
for idx, test_group in enumerate(self._pool):
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
if prov in slave.provider_allocation:
# provider is already with the slave, so just return the tests
self._pool.remove(test_group)
return test_group
else:
if len(slave.provider_allocation) >= appliance_num_limit:
continue
else:
# Adding provider to slave since there are not too many
slave.provider_allocation.append(prov)
self._pool.remove(test_group)
return test_group
else:
# No providers - ie, not a provider parametrized test
# or no params, so not parametrized at all
self._pool.remove(test_group)
return test_group
# Here means no tests were able to be sent
for test_group in self._pool:
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
# Already too many slaves with provider
app = slave.appliance
self.print_message(
'cleansing appliance', slave, purple=True)
try:
app.delete_all_providers()
except Exception as e:
self.print_message(
'cloud not cleanse', slave, red=True)
self.print_message('error:', e, red=True)
slave.provider_allocation = [prov]
self._pool.remove(test_group)
return test_group
assert not self._pool, self._pool
return []
def report_collection_diff(slaveid, from_collection, to_collection):
"""Report differences, if any exist, between master and a slave collection
Raises RuntimeError if collections differ
Note:
This function will sort functions before comparing them.
"""
from_collection, to_collection = sorted(from_collection), sorted(to_collection)
if from_collection == to_collection:
# Well, that was easy.
return
# diff the two, so we get some idea of what's wrong
diff = difflib.unified_diff(
from_collection,
to_collection,
fromfile='master',
tofile=slaveid,
)
# diff is a line generator, stringify it
diff = '\n'.join([line.rstrip() for line in diff])
return '{slaveid} diff:\n{diff}\n'.format(slaveid=slaveid, diff=diff)
class TerminalDistReporter(object):
"""Terminal Reporter for Distributed Testing
trdist reporter exists to make sure we get good distributed logging during the runtest loop,
which means the normal terminal reporter should be disabled during the loop
This class is where we make sure the terminal reporter is made aware of whatever state it
needs to report properly once we turn it back on after the runtest loop
It has special versions of pytest reporting hooks that, where possible, try to include a
slave ID. These hooks are called in :py:class:`ParallelSession`'s runtestloop hook.
"""
def __init__(self, config, terminal):
self.config = config
self.tr = terminal
self.outcomes = {}
def runtest_logstart(self, slaveid, nodeid, location):
test = self.tr._locationline(nodeid, *location)
prefix = '({}) {}'.format(slaveid, test)
self.tr.write_ensure_prefix(prefix, 'running', blue=True)
self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)
def runtest_logreport(self, slaveid, report):
# Run all the normal logreport hooks
self.config.hook.pytest_runtest_logreport(report=report)
# Now do what the terminal reporter would normally do, but include parallelizer info
outcome, letter, word = self.config.hook.pytest_report_teststatus(report=report)
# Stash stats on the terminal reporter so it reports properly
# after it's reenabled at the end of runtestloop
self.tr.stats.setdefault(outcome, []).append(report)
test = self.tr._locationline(report.nodeid, *report.location)
prefix = '({}) {}'.format(slaveid, test)
try:
# for some reason, pytest_report_teststatus returns a word, markup tuple
# when the word would be 'XPASS', so unpack it here if that's the case
word, markup = word
except (TypeError, ValueError):
# word wasn't iterable or didn't have enough values, use it as-is
pass
if word in ('PASSED', 'xfail'):
markup = {'green': True}
elif word in ('ERROR', 'FAILED', 'XPASS'):
markup = {'red': True}
elif word:
markup = {'yellow': True}
# For every stage where we can report the outcome, stash it in the outcomes dict
if word:
self.outcomes[test] = Outcome(word, markup)
# Then, when we get to the teardown report, print the last outcome
# This prevents reportings a test as 'PASSED' if its teardown phase fails, for example
if report.when == 'teardown':
word, markup = self.outcomes.pop(test)
self.tr.write_ensure_prefix(prefix, word, **markup)
Outcome = namedtuple('Outcome', ['word', 'markup'])
def unserialize_report(reportdict):
"""
Generate a :py:class:`TestReport <pytest:_pytest.runner.TestReport>` from a serialized report
"""
return runner.TestReport(**reportdict)<|fim▁end|> | |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>extern crate cc;
fn main() {
if !cfg!(feature = "no_cc") {
cc::Build::new()
.file("src/hide.c")<|fim▁hole|>}<|fim▁end|> | .compile("clear_on_drop");
} |
<|file_name|>SendSurveyRequest.java<|end_file_name|><|fim▁begin|>package co.colector.model.request;
import java.util.ArrayList;
import java.util.List;
import co.colector.ColectorApplication;
import co.colector.R;
import co.colector.model.IdInputValue;
import co.colector.model.IdValue;
import co.colector.model.Survey;
import co.colector.model.AnswerValue;
import co.colector.session.AppSession;
import co.colector.utils.NetworkUtils;
/**
* Created by dherrera on 11/10/15.
*/
public class SendSurveyRequest {
private String colector_id;
private String form_id;
private String longitud;
private String latitud;
private String horaini;
private String horafin;
private List<IdInputValue> responses;
public SendSurveyRequest(Survey survey) {
this.colector_id = String.valueOf(AppSession.getInstance().getUser().getColector_id());
this.form_id = String.valueOf(survey.getForm_id());
this.longitud = survey.getInstanceLongitude();
this.latitud = survey.getInstanceLatitude();<|fim▁hole|>
public List<IdInputValue> getResponses() {
return responses;
}
public void setResponses(List<IdInputValue> responses) {
this.responses = responses;
}
private void setResponsesData(List<IdValue> responsesData) {
responses = new ArrayList<>();
for (IdValue item : responsesData) {
switch (item.getmType()) {
case 6:
case 14:
case 16:
for (AnswerValue answerValue : item.getValue())
if (!answerValue.getValue().equals("")) {
int lastIndex = answerValue.getValue().length();
int slashIndex = answerValue.getValue().lastIndexOf("/");
responses.add(new IdInputValue(String.valueOf(item.getId()), ColectorApplication.getInstance().getString(R.string.image_name_format,
NetworkUtils.getAndroidID(ColectorApplication.getInstance()),
answerValue.getValue().substring((slashIndex + 1), lastIndex))));
}
break;
default:
for (AnswerValue answerValue : item.getValue())
responses.add(new IdInputValue(String.valueOf(item.getId()), answerValue.getValue()));
}
}
}
}<|fim▁end|> | this.horaini = survey.getInstanceHoraIni();
this.horafin = survey.getInstanceHoraFin();
this.setResponsesData(survey.getInstanceAnswers());
} |
<|file_name|>pipeline.py<|end_file_name|><|fim▁begin|># Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#<|fim▁hole|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX penguin template pipeline definition.
This file defines TFX pipeline and various components in the pipeline.
"""
from typing import List, Optional
import tensorflow_model_analysis as tfma
from tfx import v1 as tfx
from tfx.experimental.templates.penguin.models import features
from ml_metadata.proto import metadata_store_pb2
def create_pipeline(
pipeline_name: str,
pipeline_root: str,
data_path: str,
preprocessing_fn: str,
run_fn: str,
train_args: tfx.proto.TrainArgs,
eval_args: tfx.proto.EvalArgs,
eval_accuracy_threshold: float,
serving_model_dir: str,
schema_path: Optional[str] = None,
metadata_connection_config: Optional[
metadata_store_pb2.ConnectionConfig] = None,
beam_pipeline_args: Optional[List[str]] = None,
) -> tfx.dsl.Pipeline:
"""Implements the penguin pipeline with TFX."""
components = []
# Brings data into the pipeline or otherwise joins/converts training data.
# TODO(step 2): Might use another ExampleGen class for your data.
example_gen = tfx.components.CsvExampleGen(input_base=data_path)
components.append(example_gen)
# Computes statistics over data for visualization and example validation.
statistics_gen = tfx.components.StatisticsGen(
examples=example_gen.outputs['examples'])
components.append(statistics_gen)
if schema_path is None:
# Generates schema based on statistics files.
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'])
components.append(schema_gen)
else:
# Import user provided schema into the pipeline.
schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path)
components.append(schema_gen)
# Performs anomaly detection based on statistics and data schema.
example_validator = tfx.components.ExampleValidator( # pylint: disable=unused-variable
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
components.append(example_validator)
# Performs transformations and feature engineering in training and serving.
transform = tfx.components.Transform( # pylint: disable=unused-variable
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
preprocessing_fn=preprocessing_fn)
# TODO(step 3): Uncomment here to add Transform to the pipeline.
# components.append(transform)
# Uses user-provided Python function that implements a model using Tensorflow.
trainer = tfx.components.Trainer(
run_fn=run_fn,
examples=example_gen.outputs['examples'],
# Use outputs of Transform as training inputs if Transform is used.
# examples=transform.outputs['transformed_examples'],
# transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=train_args,
eval_args=eval_args)
# TODO(step 4): Uncomment here to add Trainer to the pipeline.
# components.append(trainer)
# Get the latest blessed model for model validation.
model_resolver = tfx.dsl.Resolver(
strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,
model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.dsl.Channel(
type=tfx.types.standard_artifacts.ModelBlessing)).with_id(
'latest_blessed_model_resolver')
# TODO(step 5): Uncomment here to add Resolver to the pipeline.
# components.append(model_resolver)
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='serving_default',
label_key=features.LABEL_KEY,
# Use transformed label key if Transform is used.
# label_key=features.transformed_name(features.LABEL_KEY),
preprocessing_function_names=['transform_features'])
],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': eval_accuracy_threshold}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = tfx.components.Evaluator( # pylint: disable=unused-variable
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# TODO(step 5): Uncomment here to add Evaluator to the pipeline.
# components.append(evaluator)
# Pushes the model to a file destination if check passed.
pusher = tfx.components.Pusher( # pylint: disable=unused-variable
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=serving_model_dir)))
# TODO(step 5): Uncomment here to add Pusher to the pipeline.
# components.append(pusher)
return tfx.dsl.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
# Change this value to control caching of execution results. Default value
# is `False`.
# enable_cache=True,
metadata_connection_config=metadata_connection_config,
beam_pipeline_args=beam_pipeline_args,
)<|fim▁end|> | |
<|file_name|>env.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# A platform-agnostic tool for running a program in a modified environment.
#
import sys
import os
import subprocess
from optparse import OptionParser
def main(argv=None):
parser = OptionParser(usage="Usage: %prog [options] [--] VAR=VALUE... command [options] arg1 arg2...")
parser.add_option("-i", "--ignore-environment",<|fim▁hole|> help="Start with an empty environment (do not inherit current environment)")
(options, args) = parser.parse_args(args=argv)
if options.ignore_environment:
new_env = {}
else:
new_env = os.environ.copy()
# pull out each name value pair
while (len(args)):
z = args[0].split("=",1)
if len(z) != 2:
break; # done with env args
if len(z[0]) == 0:
raise Exception("Error: incorrect format for env var: '%s'" % str(args[x]))
del args[0]
if len(z[1]) == 0:
# value is not present, so delete it
if z[0] in new_env:
del new_env[z[0]]
else:
new_env[z[0]] = z[1]
if len(args) == 0 or len(args[0]) == 0:
raise Exception("Error: syntax error in command arguments")
if new_env.get("VALGRIND") and new_env.get("VALGRIND_ALL"):
# Python generates a lot of possibly-lost errors that are not errors, don't show them.
args = [new_env.get("VALGRIND"), "--show-reachable=no", "--show-possibly-lost=no",
"--error-exitcode=42"] + args
p = subprocess.Popen(args, env=new_env)
return p.wait()
if __name__ == "__main__":
sys.exit(main())<|fim▁end|> | action="store_true", default=False, |
<|file_name|>metrics.py<|end_file_name|><|fim▁begin|># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<|fim▁hole|>import tensorflow_gan as tfgan
import tensorflow_hub as hub
i3d_model = None
lpips_model = None
def flatten_video(video):
return np.reshape(video, (-1,) + video.shape[2:])
def psnr(video_1, video_2):
video_1 = flatten_video(video_1)
video_2 = flatten_video(video_2)
dist = tf.image.psnr(video_1, video_2, max_val=1.0)
return np.mean(dist.numpy())
def ssim(video_1, video_2):
video_1 = flatten_video(video_1)
video_2 = flatten_video(video_2)
dist = tf.image.ssim(video_1, video_2, max_val=1.0)
return np.mean(dist.numpy())
def psnr_image(target_image, out_image):
dist = tf.image.psnr(target_image, out_image, max_val=1.0)
return np.mean(dist.numpy())
def psnr_per_frame(target_video, out_video):
max_val = 1.0
mse = np.mean(np.square(out_video - target_video), axis=(2, 3, 4))
return 20 * np.log10(max_val) - 10.0 * np.log10(mse)
def lpips_image(generated_image, real_image):
global lpips_model
result = tf.convert_to_tensor(0.0)
return result
def lpips(video_1, video_2):
video_1 = flatten_video(video_1)
video_2 = flatten_video(video_2)
dist = lpips_image(video_1, video_2)
return np.mean(dist.numpy())
def fvd_preprocess(videos, target_resolution):
videos = tf.convert_to_tensor(videos * 255.0, dtype=tf.float32)
videos_shape = videos.shape.as_list()
all_frames = tf.reshape(videos, [-1] + videos_shape[-3:])
resized_videos = tf.image.resize(all_frames, size=target_resolution)
target_shape = [videos_shape[0], -1] + list(target_resolution) + [3]
output_videos = tf.reshape(resized_videos, target_shape)
scaled_videos = 2. * tf.cast(output_videos, tf.float32) / 255. - 1
return scaled_videos
def create_id3_embedding(videos):
"""Get id3 embeddings."""
global i3d_model
module_spec = 'https://tfhub.dev/deepmind/i3d-kinetics-400/1'
if not i3d_model:
base_model = hub.load(module_spec)
input_tensor = base_model.graph.get_tensor_by_name('input_frames:0')
i3d_model = base_model.prune(input_tensor, 'RGB/inception_i3d/Mean:0')
output = i3d_model(videos)
return output
def calculate_fvd(real_activations, generated_activations):
return tfgan.eval.frechet_classifier_distance_from_activations(
real_activations, generated_activations)
def fvd(video_1, video_2):
video_1 = fvd_preprocess(video_1, (224, 224))
video_2 = fvd_preprocess(video_2, (224, 224))
x = create_id3_embedding(video_1)
y = create_id3_embedding(video_2)
result = calculate_fvd(x, y)
return result.numpy()
def inception_score(images):
return tfgan.eval.inception_score(images)<|fim▁end|> | """Metrics."""
import numpy as np
import tensorflow.compat.v2 as tf |
<|file_name|>walletview.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2011-2013 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "walletview.h"
#include "addressbookpage.h"
#include "askpassphrasedialog.h"
#include "bitcoingui.h"
#include "clientmodel.h"
#include "guiutil.h"
#include "optionsmodel.h"
#include "overviewpage.h"
#include "receivecoinsdialog.h"
#include "sendcoinsdialog.h"
#include "signverifymessagedialog.h"
#include "transactiontablemodel.h"
#include "transactionview.h"
#include "walletmodel.h"
#include "miningpage.h"<|fim▁hole|>#include <QAction>
#include <QActionGroup>
#include <QFileDialog>
#include <QHBoxLayout>
#include <QProgressDialog>
#include <QPushButton>
#include <QVBoxLayout>
WalletView::WalletView(QWidget *parent):
QStackedWidget(parent),
clientModel(0),
walletModel(0)
{
// Create tabs
overviewPage = new OverviewPage();
transactionsPage = new QWidget(this);
QVBoxLayout *vbox = new QVBoxLayout();
QHBoxLayout *hbox_buttons = new QHBoxLayout();
transactionView = new TransactionView(this);
vbox->addWidget(transactionView);
QPushButton *exportButton = new QPushButton(tr("&Export"), this);
exportButton->setToolTip(tr("Export the data in the current tab to a file"));
#ifndef Q_OS_MAC // Icons on push buttons are very uncommon on Mac
exportButton->setIcon(QIcon(":/icons/export"));
#endif
hbox_buttons->addStretch();
hbox_buttons->addWidget(exportButton);
vbox->addLayout(hbox_buttons);
transactionsPage->setLayout(vbox);
receiveCoinsPage = new ReceiveCoinsDialog();
sendCoinsPage = new SendCoinsDialog();
miningPage = new MiningPage();
addWidget(overviewPage);
addWidget(transactionsPage);
addWidget(receiveCoinsPage);
addWidget(sendCoinsPage);
addWidget(miningPage);
// Clicking on a transaction on the overview pre-selects the transaction on the transaction history page
connect(overviewPage, SIGNAL(transactionClicked(QModelIndex)), transactionView, SLOT(focusTransaction(QModelIndex)));
// Double-clicking on a transaction on the transaction history page shows details
connect(transactionView, SIGNAL(doubleClicked(QModelIndex)), transactionView, SLOT(showDetails()));
// Clicking on "Export" allows to export the transaction list
connect(exportButton, SIGNAL(clicked()), transactionView, SLOT(exportClicked()));
// Pass through messages from sendCoinsPage
connect(sendCoinsPage, SIGNAL(message(QString,QString,unsigned int)), this, SIGNAL(message(QString,QString,unsigned int)));
// Pass through messages from transactionView
connect(transactionView, SIGNAL(message(QString,QString,unsigned int)), this, SIGNAL(message(QString,QString,unsigned int)));
}
WalletView::~WalletView()
{
}
void WalletView::setBitcoinGUI(BitcoinGUI *gui)
{
if (gui)
{
// Clicking on a transaction on the overview page simply sends you to transaction history page
connect(overviewPage, SIGNAL(transactionClicked(QModelIndex)), gui, SLOT(gotoHistoryPage()));
// Receive and report messages
connect(this, SIGNAL(message(QString,QString,unsigned int)), gui, SLOT(message(QString,QString,unsigned int)));
// Pass through encryption status changed signals
connect(this, SIGNAL(encryptionStatusChanged(int)), gui, SLOT(setEncryptionStatus(int)));
// Pass through transaction notifications
connect(this, SIGNAL(incomingTransaction(QString,int,qint64,QString,QString)), gui, SLOT(incomingTransaction(QString,int,qint64,QString,QString)));
}
}
void WalletView::setClientModel(ClientModel *clientModel)
{
this->clientModel = clientModel;
overviewPage->setClientModel(clientModel);
miningPage->setClientModel(clientModel);
}
void WalletView::setWalletModel(WalletModel *walletModel)
{
this->walletModel = walletModel;
// Put transaction list in tabs
transactionView->setModel(walletModel);
overviewPage->setWalletModel(walletModel);
receiveCoinsPage->setModel(walletModel);
sendCoinsPage->setModel(walletModel);
miningPage->setWalletModel(walletModel);
if (walletModel)
{
// Receive and pass through messages from wallet model
connect(walletModel, SIGNAL(message(QString,QString,unsigned int)), this, SIGNAL(message(QString,QString,unsigned int)));
// Handle changes in encryption status
connect(walletModel, SIGNAL(encryptionStatusChanged(int)), this, SIGNAL(encryptionStatusChanged(int)));
updateEncryptionStatus();
// Balloon pop-up for new transaction
connect(walletModel->getTransactionTableModel(), SIGNAL(rowsInserted(QModelIndex,int,int)),
this, SLOT(processNewTransaction(QModelIndex,int,int)));
// Ask for passphrase if needed
connect(walletModel, SIGNAL(requireUnlock()), this, SLOT(unlockWallet()));
// Show progress dialog
connect(walletModel, SIGNAL(showProgress(QString,int)), this, SLOT(showProgress(QString,int)));
}
}
void WalletView::processNewTransaction(const QModelIndex& parent, int start, int /*end*/)
{
// Prevent balloon-spam when initial block download is in progress
if (!walletModel || !clientModel || clientModel->inInitialBlockDownload())
return;
TransactionTableModel *ttm = walletModel->getTransactionTableModel();
QString date = ttm->index(start, TransactionTableModel::Date, parent).data().toString();
qint64 amount = ttm->index(start, TransactionTableModel::Amount, parent).data(Qt::EditRole).toULongLong();
QString type = ttm->index(start, TransactionTableModel::Type, parent).data().toString();
QString address = ttm->index(start, TransactionTableModel::ToAddress, parent).data().toString();
emit incomingTransaction(date, walletModel->getOptionsModel()->getDisplayUnit(), amount, type, address);
}
void WalletView::gotoOverviewPage()
{
setCurrentWidget(overviewPage);
}
void WalletView::gotoHistoryPage()
{
setCurrentWidget(transactionsPage);
}
void WalletView::gotoReceiveCoinsPage()
{
setCurrentWidget(receiveCoinsPage);
}
void WalletView::gotoSendCoinsPage(QString addr)
{
setCurrentWidget(sendCoinsPage);
if (!addr.isEmpty())
sendCoinsPage->setAddress(addr);
}
void WalletView::gotoMiningPage()
{
setCurrentWidget(miningPage);
}
void WalletView::gotoSignMessageTab(QString addr)
{
// calls show() in showTab_SM()
SignVerifyMessageDialog *signVerifyMessageDialog = new SignVerifyMessageDialog(this);
signVerifyMessageDialog->setAttribute(Qt::WA_DeleteOnClose);
signVerifyMessageDialog->setModel(walletModel);
signVerifyMessageDialog->showTab_SM(true);
if (!addr.isEmpty())
signVerifyMessageDialog->setAddress_SM(addr);
}
void WalletView::gotoVerifyMessageTab(QString addr)
{
// calls show() in showTab_VM()
SignVerifyMessageDialog *signVerifyMessageDialog = new SignVerifyMessageDialog(this);
signVerifyMessageDialog->setAttribute(Qt::WA_DeleteOnClose);
signVerifyMessageDialog->setModel(walletModel);
signVerifyMessageDialog->showTab_VM(true);
if (!addr.isEmpty())
signVerifyMessageDialog->setAddress_VM(addr);
}
bool WalletView::handlePaymentRequest(const SendCoinsRecipient& recipient)
{
return sendCoinsPage->handlePaymentRequest(recipient);
}
void WalletView::showOutOfSyncWarning(bool fShow)
{
overviewPage->showOutOfSyncWarning(fShow);
}
void WalletView::updateEncryptionStatus()
{
emit encryptionStatusChanged(walletModel->getEncryptionStatus());
}
void WalletView::encryptWallet(bool status)
{
if(!walletModel)
return;
AskPassphraseDialog dlg(status ? AskPassphraseDialog::Encrypt : AskPassphraseDialog::Decrypt, this);
dlg.setModel(walletModel);
dlg.exec();
updateEncryptionStatus();
}
void WalletView::backupWallet()
{
QString filename = GUIUtil::getSaveFileName(this,
tr("Backup Wallet"), QString(),
tr("Wallet Data (*.dat)"), NULL);
if (filename.isEmpty())
return;
if (!walletModel->backupWallet(filename)) {
emit message(tr("Backup Failed"), tr("There was an error trying to save the wallet data to %1.").arg(filename),
CClientUIInterface::MSG_ERROR);
}
else {
emit message(tr("Backup Successful"), tr("The wallet data was successfully saved to %1.").arg(filename),
CClientUIInterface::MSG_INFORMATION);
}
}
void WalletView::changePassphrase()
{
AskPassphraseDialog dlg(AskPassphraseDialog::ChangePass, this);
dlg.setModel(walletModel);
dlg.exec();
}
void WalletView::unlockWallet()
{
if(!walletModel)
return;
// Unlock wallet when requested by wallet model
if (walletModel->getEncryptionStatus() == WalletModel::Locked)
{
AskPassphraseDialog dlg(AskPassphraseDialog::Unlock, this);
dlg.setModel(walletModel);
dlg.exec();
}
}
void WalletView::usedSendingAddresses()
{
if(!walletModel)
return;
AddressBookPage *dlg = new AddressBookPage(AddressBookPage::ForEditing, AddressBookPage::SendingTab, this);
dlg->setAttribute(Qt::WA_DeleteOnClose);
dlg->setModel(walletModel->getAddressTableModel());
dlg->show();
}
void WalletView::usedReceivingAddresses()
{
if(!walletModel)
return;
AddressBookPage *dlg = new AddressBookPage(AddressBookPage::ForEditing, AddressBookPage::ReceivingTab, this);
dlg->setAttribute(Qt::WA_DeleteOnClose);
dlg->setModel(walletModel->getAddressTableModel());
dlg->show();
}
void WalletView::showProgress(const QString &title, int nProgress)
{
if (nProgress == 0)
{
progressDialog = new QProgressDialog(title, "", 0, 100);
progressDialog->setWindowModality(Qt::ApplicationModal);
progressDialog->setMinimumDuration(0);
progressDialog->setCancelButton(0);
progressDialog->setAutoClose(false);
progressDialog->setValue(0);
}
else if (nProgress == 100)
{
if (progressDialog)
{
progressDialog->close();
progressDialog->deleteLater();
}
}
else if (progressDialog)
progressDialog->setValue(nProgress);
}<|fim▁end|> |
#include "ui_interface.h"
|
<|file_name|>parsemode.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# pylint: disable=R0903
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents a Telegram
Message Parse Modes."""
<|fim▁hole|> MARKDOWN = 'Markdown'
HTML = 'HTML'<|fim▁end|> |
class ParseMode(object):
"""This object represents a Telegram Message Parse Modes."""
|
<|file_name|>metrics-router.go<|end_file_name|><|fim▁begin|>/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"github.com/gorilla/mux"
)
const (<|fim▁hole|>// registerMetricsRouter - add handler functions for metrics.
func registerMetricsRouter(router *mux.Router) {
// metrics router
metricsRouter := router.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
metricsRouter.Handle(prometheusMetricsPath, metricsHandler())
}<|fim▁end|> | prometheusMetricsPath = "/prometheus/metrics"
)
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#[link(
name = "vertesaur",
vers = "0.0.1",
author = "Aaron Dandy",
url = "https://github.com/aarondandy/vertesaur-rust"
)];
#[desc = "A computational geometry and math library for Rust with potential medical, gaming, and GIS uses."];
#[license = "MIT"];
#[crate_type = "lib"];
pub mod vector;<|fim▁hole|><|fim▁end|> | pub mod point; |
<|file_name|>mirror.cc<|end_file_name|><|fim▁begin|>// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: mirror.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
/* ######################################################################
Mirror Aquire Method - This is the Mirror aquire method for APT.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <apt-pkg/fileutl.h>
#include <apt-pkg/acquire-method.h>
#include <apt-pkg/acquire-item.h>
#include <apt-pkg/acquire.h>
#include <apt-pkg/error.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/sourcelist.h>
#include <fstream>
#include <iostream>
#include <stdarg.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
using namespace std;
#include<sstream>
#include "mirror.h"
#include "http.h"
#include "apti18n.h"
/*}}}*/
/* Done:
* - works with http (only!)
* - always picks the first mirror from the list
* - call out to problem reporting script
* - supports "deb mirror://host/path/to/mirror-list/// dist component"
* - uses pkgAcqMethod::FailReason() to have a string representation
* of the failure that is also send to LP
*
* TODO:
* - deal with runing as non-root because we can't write to the lists
dir then -> use the cached mirror file
* - better method to download than having a pkgAcquire interface here
* and better error handling there!
* - support more than http
* - testing :)
*/
MirrorMethod::MirrorMethod()
: HttpMethod(), DownloadedMirrorFile(false)
{
};
// HttpMethod::Configuration - Handle a configuration message /*{{{*/
// ---------------------------------------------------------------------
/* We stash the desired pipeline depth */
bool MirrorMethod::Configuration(string Message)
{
if (pkgAcqMethod::Configuration(Message) == false)
return false;
Debug = _config->FindB("Debug::Acquire::mirror",false);
return true;
}
/*}}}*/
// clean the mirrors dir based on ttl information
bool MirrorMethod::Clean(string Dir)
{
vector<metaIndex *>::const_iterator I;
if(Debug)
clog << "MirrorMethod::Clean(): " << Dir << endl;
if(Dir == "/")
return _error->Error("will not clean: '/'");
// read sources.list
pkgSourceList list;
list.ReadMainList();
DIR *D = opendir(Dir.c_str());
if (D == 0)
return _error->Errno("opendir",_("Unable to read %s"),Dir.c_str());
string StartDir = SafeGetCWD();
if (chdir(Dir.c_str()) != 0)
{
closedir(D);
return _error->Errno("chdir",_("Unable to change to %s"),Dir.c_str());
}
for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D))
{
// Skip some files..
if (strcmp(Dir->d_name,"lock") == 0 ||
strcmp(Dir->d_name,"partial") == 0 ||
strcmp(Dir->d_name,".") == 0 ||
strcmp(Dir->d_name,"..") == 0)
continue;
// see if we have that uri
for(I=list.begin(); I != list.end(); I++)
{
string uri = (*I)->GetURI();
if(uri.find("mirror://") != 0)
continue;
string BaseUri = uri.substr(0,uri.size()-1);
if (URItoFileName(BaseUri) == Dir->d_name)
break;
}
// nothing found, nuke it
if (I == list.end())
unlink(Dir->d_name);
};
chdir(StartDir.c_str());
closedir(D);
return true;
}
bool MirrorMethod::DownloadMirrorFile(string mirror_uri_str)
{
if(Debug)
clog << "MirrorMethod::DownloadMirrorFile(): " << endl;
// not that great to use pkgAcquire here, but we do not have
// any other way right now
string fetch = BaseUri;
fetch.replace(0,strlen("mirror://"),"http://");
pkgAcquire Fetcher;
new pkgAcqFile(&Fetcher, fetch, "", 0, "", "", "", MirrorFile);
bool res = (Fetcher.Run() == pkgAcquire::Continue);
if(res)
DownloadedMirrorFile = true;
Fetcher.Shutdown();
return res;
}
/* convert a the Queue->Uri back to the mirror base uri and look
* at all mirrors we have for this, this is needed as queue->uri
* may point to different mirrors (if TryNextMirror() was run)
*/
void MirrorMethod::CurrentQueueUriToMirror()
{
// already in mirror:// style so nothing to do
if(Queue->Uri.find("mirror://") == 0)
return;
// find current mirror and select next one
for (vector<string>::const_iterator mirror = AllMirrors.begin();
mirror != AllMirrors.end(); ++mirror)
{
if (Queue->Uri.find(*mirror) == 0)
{
Queue->Uri.replace(0, mirror->length(), BaseUri);
return;
}
}
_error->Error("Internal error: Failed to convert %s back to %s",
Queue->Uri.c_str(), BaseUri.c_str());
}
bool MirrorMethod::TryNextMirror()
{
// find current mirror and select next one
for (vector<string>::const_iterator mirror = AllMirrors.begin();
mirror != AllMirrors.end(); ++mirror)
{
if (Queue->Uri.find(*mirror) != 0)
continue;
vector<string>::const_iterator nextmirror = mirror + 1;
if (nextmirror != AllMirrors.end())
break;
Queue->Uri.replace(0, mirror->length(), *nextmirror);
if (Debug)
clog << "TryNextMirror: " << Queue->Uri << endl;
return true;
}
if (Debug)
clog << "TryNextMirror could not find another mirror to try" << endl;
return false;
}
bool MirrorMethod::InitMirrors()
{
// if we do not have a MirrorFile, fallback
if(!FileExists(MirrorFile))
{
// FIXME: fallback to a default mirror here instead
// and provide a config option to define that default
return _error->Error(_("No mirror file '%s' found "), MirrorFile.c_str());
}
// FIXME: make the mirror selection more clever, do not
// just use the first one!
// BUT: we can not make this random, the mirror has to be
// stable accross session, because otherwise we can
// get into sync issues (got indexfiles from mirror A,
// but packages from mirror B - one might be out of date etc)
ifstream in(MirrorFile.c_str());
string s;
while (!in.eof())
{
getline(in, s);
if (s.size() > 0)
AllMirrors.push_back(s);<|fim▁hole|> Mirror = AllMirrors[0];
UsedMirror = Mirror;
return true;
}
string MirrorMethod::GetMirrorFileName(string mirror_uri_str)
{
/*
- a mirror_uri_str looks like this:
mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors/dists/feisty/Release.gpg
- the matching source.list entry
deb mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors feisty main
- we actually want to go after:
http://people.ubuntu.com/~mvo/apt/mirror/mirrors
And we need to save the BaseUri for later:
- mirror://people.ubuntu.com/~mvo/apt/mirror/mirrors
FIXME: what if we have two similar prefixes?
mirror://people.ubuntu.com/~mvo/mirror
mirror://people.ubuntu.com/~mvo/mirror2
then mirror_uri_str looks like:
mirror://people.ubuntu.com/~mvo/apt/mirror/dists/feisty/Release.gpg
mirror://people.ubuntu.com/~mvo/apt/mirror2/dists/feisty/Release.gpg
we search sources.list and find:
mirror://people.ubuntu.com/~mvo/apt/mirror
in both cases! So we need to apply some domain knowledge here :( and
check for /dists/ or /Release.gpg as suffixes
*/
string name;
if(Debug)
std::cerr << "GetMirrorFileName: " << mirror_uri_str << std::endl;
// read sources.list and find match
vector<metaIndex *>::const_iterator I;
pkgSourceList list;
list.ReadMainList();
for(I=list.begin(); I != list.end(); I++)
{
string uristr = (*I)->GetURI();
if(Debug)
std::cerr << "Checking: " << uristr << std::endl;
if(uristr.substr(0,strlen("mirror://")) != string("mirror://"))
continue;
// find matching uri in sources.list
if(mirror_uri_str.substr(0,uristr.size()) == uristr)
{
if(Debug)
std::cerr << "found BaseURI: " << uristr << std::endl;
BaseUri = uristr.substr(0,uristr.size()-1);
}
}
// get new file
name = _config->FindDir("Dir::State::mirrors") + URItoFileName(BaseUri);
if(Debug)
{
cerr << "base-uri: " << BaseUri << endl;
cerr << "mirror-file: " << name << endl;
}
return name;
}
// MirrorMethod::Fetch - Fetch an item /*{{{*/
// ---------------------------------------------------------------------
/* This adds an item to the pipeline. We keep the pipeline at a fixed
depth. */
bool MirrorMethod::Fetch(FetchItem *Itm)
{
if(Debug)
clog << "MirrorMethod::Fetch()" << endl;
// the http method uses Fetch(0) as a way to update the pipeline,
// just let it do its work in this case - Fetch() with a valid
// Itm will always run before the first Fetch(0)
if(Itm == NULL)
return HttpMethod::Fetch(Itm);
// if we don't have the name of the mirror file on disk yet,
// calculate it now (can be derived from the uri)
if(MirrorFile.empty())
MirrorFile = GetMirrorFileName(Itm->Uri);
// download mirror file once (if we are after index files)
if(Itm->IndexFile && !DownloadedMirrorFile)
{
Clean(_config->FindDir("Dir::State::mirrors"));
DownloadMirrorFile(Itm->Uri);
}
if(AllMirrors.empty()) {
if(!InitMirrors()) {
// no valid mirror selected, something went wrong downloading
// from the master mirror site most likely and there is
// no old mirror file availalbe
return false;
}
}
if(Itm->Uri.find("mirror://") != string::npos)
Itm->Uri.replace(0,BaseUri.size(), Mirror);
if(Debug)
clog << "Fetch: " << Itm->Uri << endl << endl;
// now run the real fetcher
return HttpMethod::Fetch(Itm);
};
void MirrorMethod::Fail(string Err,bool Transient)
{
// FIXME: TryNextMirror is not ideal for indexfile as we may
// run into auth issues
if (Debug)
clog << "Failure to get " << Queue->Uri << endl;
// try the next mirror on fail (if its not a expected failure,
// e.g. translations are ok to ignore)
if (!Queue->FailIgnore && TryNextMirror())
return;
// all mirrors failed, so bail out
string s;
strprintf(s, _("[Mirror: %s]"), Mirror.c_str());
SetIP(s);
CurrentQueueUriToMirror();
pkgAcqMethod::Fail(Err, Transient);
}
void MirrorMethod::URIStart(FetchResult &Res)
{
CurrentQueueUriToMirror();
pkgAcqMethod::URIStart(Res);
}
void MirrorMethod::URIDone(FetchResult &Res,FetchResult *Alt)
{
CurrentQueueUriToMirror();
pkgAcqMethod::URIDone(Res, Alt);
}
int main()
{
setlocale(LC_ALL, "");
MirrorMethod Mth;
return Mth.Loop();
}<|fim▁end|> | } |
<|file_name|>tests.rs<|end_file_name|><|fim▁begin|>use super::*;
#[test]
fn test_struct_info_roundtrip() {
let s = ItemEnum::Struct(Struct {
struct_type: StructType::Plain,
generics: Generics { params: vec![], where_predicates: vec![] },
fields_stripped: false,
fields: vec![],
impls: vec![],
});
let struct_json = serde_json::to_string(&s).unwrap();
let de_s = serde_json::from_str(&struct_json).unwrap();
assert_eq!(s, de_s);
}
#[test]
fn test_union_info_roundtrip() {
let u = ItemEnum::Union(Union {
generics: Generics { params: vec![], where_predicates: vec![] },
fields_stripped: false,
fields: vec![],
impls: vec![],
});
let union_json = serde_json::to_string(&u).unwrap();
let de_u = serde_json::from_str(&union_json).unwrap();<|fim▁hole|><|fim▁end|> |
assert_eq!(u, de_u);
} |
<|file_name|>ForStatement.java<|end_file_name|><|fim▁begin|>// Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");<|fim▁hole|>// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net.starlark.java.syntax;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
/** Syntax node for a for loop statement, {@code for vars in iterable: ...}. */
public final class ForStatement extends Statement {
private final int forOffset;
private final Expression vars;
private final Expression iterable;
private final ImmutableList<Statement> body; // non-empty if well formed
/** Constructs a for loop statement. */
ForStatement(
FileLocations locs,
int forOffset,
Expression vars,
Expression iterable,
ImmutableList<Statement> body) {
super(locs);
this.forOffset = forOffset;
this.vars = Preconditions.checkNotNull(vars);
this.iterable = Preconditions.checkNotNull(iterable);
this.body = body;
}
/**
* Returns variables assigned by each iteration. May be a compound target such as {@code (a[b],
* c.d)}.
*/
public Expression getVars() {
return vars;
}
/** Returns the iterable value. */
// TODO(adonovan): rename to getIterable.
public Expression getCollection() {
return iterable;
}
/** Returns the statements of the loop body. Non-empty if parsing succeeded. */
public ImmutableList<Statement> getBody() {
return body;
}
@Override
public int getStartOffset() {
return forOffset;
}
@Override
public int getEndOffset() {
return body.isEmpty()
? iterable.getEndOffset() // wrong, but tree is ill formed
: body.get(body.size() - 1).getEndOffset();
}
@Override
public String toString() {
return "for " + vars + " in " + iterable + ": ...\n";
}
@Override
public void accept(NodeVisitor visitor) {
visitor.visit(this);
}
@Override
public Kind kind() {
return Kind.FOR;
}
}<|fim▁end|> | // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// |
<|file_name|>getOptionsHandler.go<|end_file_name|><|fim▁begin|>package main
import (
"strconv"
"github.com/gin-gonic/gin"
)
func getOptionsHandler(c *gin.Context) {
var request struct{
Hash string `form:"hash" binding:"required"`
}
if c.Bind(&request) == nil{
var optionsList, optionsCount = makeOptionsList()
var common_set Set
var initdata initData
initdata.Hash = request.Hash
initdata.TotalPrice = "699.00"
initdata.Gender = "M"<|fim▁hole|> common_set.Key = strconv.Itoa(i+1)
common_set.Name = optionsList[i]
common_set.Options = make([]Options, 0)
var optionsCount = optionsCount[i]
for j:=1; j<=optionsCount; j++{
var option Options
option = fetchOptions(i + 1, j)
option.Selected, initdata.Favorites = getDesignHash(initdata.Hash, i+1, j)
common_set.Options = append(common_set.Options, option)
}
initdata.Data = append(initdata.Data, common_set)
}
// insertNewHash(initdata.Hash, request.Mobileno)
c.JSON(200, gin.H{
"status": "success",
"data": initdata,
})
}
}<|fim▁end|> | initdata.Data = make([]Set, 0)
for i:=0;i<=11;i++{ |
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|>from .howdoi import command_line_runner<|fim▁hole|><|fim▁end|> |
command_line_runner() |
<|file_name|>tls.go<|end_file_name|><|fim▁begin|>package types
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"os"
"github.com/traefik/traefik/v2/pkg/log"
)
// +k8s:deepcopy-gen=true
// ClientTLS holds TLS specific configurations as client
// CA, Cert and Key can be either path or file contents.
type ClientTLS struct {
CA string `description:"TLS CA" json:"ca,omitempty" toml:"ca,omitempty" yaml:"ca,omitempty"`
CAOptional bool `description:"TLS CA.Optional" json:"caOptional,omitempty" toml:"caOptional,omitempty" yaml:"caOptional,omitempty" export:"true"`
Cert string `description:"TLS cert" json:"cert,omitempty" toml:"cert,omitempty" yaml:"cert,omitempty"`
Key string `description:"TLS key" json:"key,omitempty" toml:"key,omitempty" yaml:"key,omitempty" loggable:"false"`
InsecureSkipVerify bool `description:"TLS insecure skip verify" json:"insecureSkipVerify,omitempty" toml:"insecureSkipVerify,omitempty" yaml:"insecureSkipVerify,omitempty" export:"true"`
}
// CreateTLSConfig creates a TLS config from ClientTLS structures.
func (clientTLS *ClientTLS) CreateTLSConfig(ctx context.Context) (*tls.Config, error) {
if clientTLS == nil {
log.FromContext(ctx).Warnf("clientTLS is nil")
return nil, nil<|fim▁hole|>
// Not initialized, to rely on system bundle.
var caPool *x509.CertPool
clientAuth := tls.NoClientCert
if clientTLS.CA != "" {
var ca []byte
if _, errCA := os.Stat(clientTLS.CA); errCA == nil {
var err error
ca, err = os.ReadFile(clientTLS.CA)
if err != nil {
return nil, fmt.Errorf("failed to read CA. %w", err)
}
} else {
ca = []byte(clientTLS.CA)
}
caPool = x509.NewCertPool()
if !caPool.AppendCertsFromPEM(ca) {
return nil, errors.New("failed to parse CA")
}
if clientTLS.CAOptional {
clientAuth = tls.VerifyClientCertIfGiven
} else {
clientAuth = tls.RequireAndVerifyClientCert
}
}
hasCert := len(clientTLS.Cert) > 0
hasKey := len(clientTLS.Key) > 0
if hasCert != hasKey {
return nil, errors.New("both TLS cert and key must be defined")
}
if !hasCert || !hasKey {
return &tls.Config{
RootCAs: caPool,
InsecureSkipVerify: clientTLS.InsecureSkipVerify,
ClientAuth: clientAuth,
}, nil
}
cert, err := loadKeyPair(clientTLS.Cert, clientTLS.Key)
if err != nil {
return nil, err
}
return &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caPool,
InsecureSkipVerify: clientTLS.InsecureSkipVerify,
ClientAuth: clientAuth,
}, nil
}
func loadKeyPair(cert, key string) (tls.Certificate, error) {
keyPair, err := tls.X509KeyPair([]byte(cert), []byte(key))
if err == nil {
return keyPair, nil
}
_, err = os.Stat(cert)
if err != nil {
return tls.Certificate{}, errors.New("cert file does not exist")
}
_, err = os.Stat(key)
if err != nil {
return tls.Certificate{}, errors.New("key file does not exist")
}
keyPair, err = tls.LoadX509KeyPair(cert, key)
if err != nil {
return tls.Certificate{}, err
}
return keyPair, nil
}<|fim▁end|> | } |
<|file_name|>CAS.js<|end_file_name|><|fim▁begin|>/* eslint key-spacing : 0 */
const EventEmitter = require('events');
class CAS extends EventEmitter {
constructor() {
super();
this.timeout = {
encode_ignition : null,
};
} // constructor()
// [0x130] Ignition status
decode_ignition(data) {
data.command = 'bro';
let new_level_name;
// Save previous ignition status
const previous_level = status.vehicle.ignition_level;
// Set ignition status value
update.status('vehicle.ignition_level', data.msg[0], false);
switch (data.msg[0]) {
case 0x00 : new_level_name = 'off'; break;
case 0x40 : // Whilst just beginning to turn the key
case 0x41 : new_level_name = 'accessory'; break;
case 0x45 : new_level_name = 'run'; break;
case 0x55 : new_level_name = 'start'; break;
default : new_level_name = 'unknown';
}
update.status('vehicle.ignition', new_level_name, false);
if (data.msg[0] > previous_level) { // Ignition going up
switch (data.msg[0]) { // Evaluate new ignition state
case 0x40 :
case 0x41 : { // Accessory
log.module('Powerup state');
break;
}
case 0x45 : { // Run
log.module('Run state');
// Perform KOMBI gauge sweep, if enabled
KOMBI.gauge_sweep();
break;
}
case 0x55 : { // Start
switch (previous_level) {
case 0x00 : { // If the accessory (1) ignition message wasn't caught
log.module('Powerup state');
break;<|fim▁hole|> }
case 0x45 : { // If the run (3) ignition message wasn't caught
log.module('Run state');
break;
}
default : {
log.module('Start-begin state');
}
}
}
}
}
else if (data.msg[0] < previous_level) { // Ignition going down
switch (data.msg[0]) { // Evaluate new ignition state
case 0x00 : { // Off
// If the accessory (1) ignition message wasn't caught
if (previous_level === 0x45) {
log.module('Powerdown state');
}
log.module('Poweroff state');
break;
}
case 0x40 :
case 0x41 : { // Accessory
log.module('Powerdown state');
break;
}
case 0x45 : { // Run
log.module('Start-end state');
}
}
}
data.value = 'ignition status: ' + status.vehicle.ignition;
return data;
} // decode_ignition(data)
// Ignition status
encode_ignition(action) {
// Bounce if not enabled
if (config.emulate.cas !== true) return;
// Handle setting/unsetting timeout
switch (action) {
case false : {
// Return here if timeout is already null
if (this.timeout.encode_ignition !== null) {
clearTimeout(this.timeout.encode_ignition);
this.timeout.encode_ignition = null;
log.module('Unset ignition status timeout');
}
// Send ignition off message
bus.data.send({
bus : config.cas.can_intf,
id : 0x12F,
data : Buffer.from([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ]),
});
// Return here since we're not re-sending again
return;
}
case true : {
if (this.timeout.encode_ignition === null) {
log.module('Set ignition status timeout');
}
this.timeout.encode_ignition = setTimeout(this.encode_ignition, 100);
}
}
const msg = {
bus : config.cas.can_intf,
};
switch (config.cas.generation.toLowerCase()) {
case 'exx' : { // CIC
msg.id = 0x4F8;
msg.data = [ 0x00, 0x42, 0xFE, 0x01, 0xFF, 0xFF, 0xFF, 0xFF ];
break;
}
case 'fxx' : { // NBT
msg.id = 0x12F;
msg.data = [ 0x37, 0x7C, 0x8A, 0xDD, 0xD4, 0x05, 0x33, 0x6B ];
break;
}
default : {
log.error('config.cas.generation must be set to one of Exx or Fxx');
return;
}
}
// Convert data array to Buffer
msg.data = Buffer.from(msg.data);
// Send message
bus.data.send(msg);
} // encode_ignition(action)
// Broadcast: Key fob status
// [0x23A] Decode a key fob bitmask message, and act upon the results
decode_status_keyfob(data) {
data.command = 'bro';
data.value = 'key fob status - ';
const mask = bitmask.check(data.msg[2]).mask;
const keyfob = {
button : null,
button_str : null,
buttons : {
lock : mask.bit2 && !mask.bit0 && !mask.bit4 && !mask.bit8,
unlock : !mask.bit2 && mask.bit0 && !mask.bit4 && !mask.bit8,
trunk : !mask.bit2 && !mask.bit0 && mask.bit4 && !mask.bit8,
none : !mask.bit2 && !mask.bit0 && !mask.bit4,
},
};
// Loop button object to populate log string
for (const button in keyfob.buttons) {
if (keyfob.buttons[button] !== true) continue;
keyfob.button = button;
keyfob.button_str = 'button: ' + button;
break;
}
// Update status object
update.status('cas.keyfob.button', keyfob.button, false);
update.status('cas.keyfob.buttons.lock', keyfob.buttons.lock, false);
update.status('cas.keyfob.buttons.none', keyfob.buttons.none, false);
update.status('cas.keyfob.buttons.trunk', keyfob.buttons.trunk, false);
update.status('cas.keyfob.buttons.unlock', keyfob.buttons.unlock, false);
// Emit keyfob event
this.emit('keyfob', keyfob);
// Assemble log string
data.value += keyfob.key_str + ', ' + keyfob.button_str + ', ' + keyfob.low_batt_str;
return data;
} // decode_status_keyfob(data)
// [0x2FC] Decode a door status message from CAS and act upon the results
decode_status_opened(data) {
data.command = 'bro';
data.value = 'door status';
// Set status from message by decoding bitmask
update.status('doors.front_left', bitmask.test(data.msg[1], 0x01), false);
update.status('doors.front_right', bitmask.test(data.msg[1], 0x04), false);
update.status('doors.hood', bitmask.test(data.msg[2], 0x04), false);
update.status('doors.rear_left', bitmask.test(data.msg[1], 0x10), false);
update.status('doors.rear_right', bitmask.test(data.msg[1], 0x40), false);
update.status('doors.trunk', bitmask.test(data.msg[2], 0x01), false);
// Set status.doors.closed if all doors are closed
const update_closed_doors = (!status.doors.front_left && !status.doors.front_right && !status.doors.rear_left && !status.doors.rear_right);
update.status('doors.closed', update_closed_doors, false);
// Set status.doors.opened if any doors are opened
update.status('doors.opened', (update_closed_doors === false), false);
// Set status.doors.sealed if all doors and flaps are closed
const update_sealed_doors = (status.doors.closed && !status.doors.hood && !status.doors.trunk);
update.status('doors.sealed', update_sealed_doors, false);
return data;
} // decode_status_opened(data)
init_listeners() {
// Bounce if not enabled
if (config.emulate.cas !== true && config.retrofit.cas !== true) return;
// Perform commands on power lib active event
power.on('active', data => {
this.encode_ignition(data.new);
});
log.module('Initialized listeners');
} // init_listeners()
// Parse data sent to module
parse_in(data) {
// Bounce if not enabled
if (config.emulate.cas !== true) return;
return data;
} // parse_in(data);
// Parse data sent from module
parse_out(data) {
switch (data.src.id) {
case 0x130 : return this.decode_ignition(data); // 0x12F / 0x4F8
case 0x23A : return this.decode_status_keyfob(data);
case 0x2FC : return this.decode_status_opened(data);
}
return data;
} // parse_out();
}
module.exports = CAS;<|fim▁end|> | |
<|file_name|>test_yaml_utils.py<|end_file_name|><|fim▁begin|>import pytest
from apispec import yaml_utils
def test_load_yaml_from_docstring():
def f():
"""
Foo
bar
baz quux
---
herp: 1
derp: 2
"""
result = yaml_utils.load_yaml_from_docstring(f.__doc__)
assert result == {"herp": 1, "derp": 2}
@pytest.mark.parametrize("docstring", (None, "", "---"))
def test_load_yaml_from_docstring_empty_docstring(docstring):
assert yaml_utils.load_yaml_from_docstring(docstring) == {}
@pytest.mark.parametrize("docstring", (None, "", "---"))
def test_load_operations_from_docstring_empty_docstring(docstring):<|fim▁hole|>
def test_dict_to_yaml_unicode():
assert yaml_utils.dict_to_yaml({"가": "나"}) == '"\\uAC00": "\\uB098"\n'
assert yaml_utils.dict_to_yaml({"가": "나"}, {"allow_unicode": True}) == "가: 나\n"<|fim▁end|> | assert yaml_utils.load_operations_from_docstring(docstring) == {} |
<|file_name|>modules.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | angular.module('CareFull', ['ui.router','rzModule']); |
<|file_name|>layouts.py<|end_file_name|><|fim▁begin|>''' Functions for arranging bokeh Layout objects.
'''
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from .core.enums import Location, SizingMode
from .models.tools import ToolbarBox
from .models.plots import Plot
from .models.layouts import LayoutDOM, Row, Column, Spacer, WidgetBox
from .models.widgets import Widget
from .util._plot_arg_helpers import _convert_responsive
#-----------------------------------------------------------------------------
# Common helper functions
#-----------------------------------------------------------------------------
def _handle_children(*args, **kwargs):
children = kwargs.get('children')
# Set-up Children from args or kwargs
if len(args) > 0 and children is not None:
raise ValueError("'children' keyword cannot be used with positional arguments")
if not children:
if len(args) == 1 and isinstance(args[0], list):
children = args[0]
elif len(args) == 1 and isinstance(args[0], GridSpec):
children = args[0]
else:
children = list(args)
return children
def _verify_sizing_mode(sizing_mode):
if sizing_mode not in SizingMode:
raise ValueError("Invalid value of sizing_mode: %s" % sizing_mode)
def row(*args, **kwargs):
""" Create a row of Bokeh Layout objects. Forces all objects to
have the same sizing_mode, which is required for complex layouts to work.
Args:
children (list of :class:`~bokeh.models.layouts.LayoutDOM` ): A list of instances for
the row. Can be any of the following - :class:`~bokeh.models.plots.Plot`,
:class:`~bokeh.models.widgets.widget.Widget`, :class:`~bokeh.models.layouts.WidgetBox`,
:class:`~bokeh.models.layouts.Row`,
:class:`~bokeh.models.layouts.Column`,
:class:`~bokeh.models.tools.ToolbarBox`,
:class:`~bokeh.models.layouts.Spacer`.
sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How
will the items in the layout resize to fill the available space.
Default is ``"fixed"``. For more information on the different
modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode`
description on :class:`~bokeh.models.layouts.LayoutDOM`.
responsive (``True``, ``False``): True sets ``sizing_mode`` to
``"width_ar"``. ``False`` sets sizing_mode to ``"fixed"``. Using
responsive will override sizing_mode.
Returns:
Row: A row of LayoutDOM objects all with the same sizing_mode.
Examples:
>>> row([plot_1, plot_2])
>>> row(children=[widget_box_1, plot_1], sizing_mode='stretch_both')
"""
responsive = kwargs.pop('responsive', None)
sizing_mode = kwargs.pop('sizing_mode', 'fixed')
children = kwargs.pop('children', None)
if responsive:
sizing_mode = _convert_responsive(responsive)
_verify_sizing_mode(sizing_mode)
children = _handle_children(*args, children=children)
row_children = []
for item in children:
if isinstance(item, LayoutDOM):
item.sizing_mode = sizing_mode
row_children.append(item)
else:
raise ValueError(
"""Only LayoutDOM items can be inserted into a row.
Tried to insert: %s of type %s""" % (item, type(item))
)
return Row(children=row_children, sizing_mode=sizing_mode, **kwargs)
def column(*args, **kwargs):
""" Create a column of Bokeh Layout objects. Forces all objects to
have the same sizing_mode, which is required for complex layouts to work.
Args:
children (list of :class:`~bokeh.models.layouts.LayoutDOM` ): A list of instances for
the column. Can be any of the following - :class:`~bokeh.models.plots.Plot`,
:class:`~bokeh.models.widgets.widget.Widget`, :class:`~bokeh.models.layouts.WidgetBox`,
:class:`~bokeh.models.layouts.Row`,
:class:`~bokeh.models.layouts.Column`,
:class:`~bokeh.models.tools.ToolbarBox`,
:class:`~bokeh.models.layouts.Spacer`.
sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How
will the items in the layout resize to fill the available space.
Default is ``"fixed"``. For more information on the different
modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode`
description on :class:`~bokeh.models.layouts.LayoutDOM`.
responsive (``True``, ``False``): True sets ``sizing_mode`` to
``"width_ar"``. ``False`` sets sizing_mode to ``"fixed"``. Using
responsive will override sizing_mode.
Returns:
Column: A column of LayoutDOM objects all with the same sizing_mode.
Examples:
>>> column([plot_1, plot_2])
>>> column(children=[widget_box_1, plot_1], sizing_mode='stretch_both')
"""
responsive = kwargs.pop('responsive', None)
sizing_mode = kwargs.pop('sizing_mode', 'fixed')
children = kwargs.pop('children', None)
if responsive:
sizing_mode = _convert_responsive(responsive)
_verify_sizing_mode(sizing_mode)
children = _handle_children(*args, children=children)
col_children = []
for item in children:
if isinstance(item, LayoutDOM):
item.sizing_mode = sizing_mode
col_children.append(item)
else:
raise ValueError(
"""Only LayoutDOM items can be inserted into a column.
Tried to insert: %s of type %s""" % (item, type(item))
)
return Column(children=col_children, sizing_mode=sizing_mode, **kwargs)
def widgetbox(*args, **kwargs):
""" Create a WidgetBox of Bokeh widgets. Forces all to
have the same sizing_mode, which is required for complex layouts to work.
Args:
children (list of :class:`~bokeh.models.widgets.widget.Widget` ): A list
of widgets for the WidgetBox.
sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How
will the items in the layout resize to fill the available space.
Default is ``"fixed"``. For more information on the different
modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode`
description on :class:`~bokeh.models.layouts.LayoutDOM`.
responsive (``True``, ``False``): True sets ``sizing_mode`` to
``"width_ar"``. ``False`` sets sizing_mode to ``"fixed"``. Using
responsive will override sizing_mode.
Returns:
WidgetBox: A WidgetBox of Widget instances all with the same sizing_mode.
Examples:
>>> widgetbox([button, select])
>>> widgetbox(children=[slider], sizing_mode='scale_width')
"""
responsive = kwargs.pop('responsive', None)
sizing_mode = kwargs.pop('sizing_mode', 'fixed')
children = kwargs.pop('children', None)
if responsive:
sizing_mode = _convert_responsive(responsive)
_verify_sizing_mode(sizing_mode)
children = _handle_children(*args, children=children)
widget_children = []
for item in children:
if isinstance(item, Widget):
item.sizing_mode = sizing_mode
widget_children.append(item)
else:
raise ValueError(
"""Only Widgets can be inserted into a WidgetBox.
Tried to insert: %s of type %s""" % (item, type(item))
)
return WidgetBox(children=widget_children, sizing_mode=sizing_mode, **kwargs)
def layout(*args, **kwargs):
""" Create a grid-based arrangement of Bokeh Layout objects. Forces all objects to
have the same sizing mode, which is required for complex layouts to work. Returns a nested set
of Rows and Columns.
Args:
children (list of lists of :class:`~bokeh.models.layouts.LayoutDOM` ): A list of lists of instances
for a grid layout. Can be any of the following - :class:`~bokeh.models.plots.Plot`,
:class:`~bokeh.models.widgets.widget.Widget`, :class:`~bokeh.models.layouts.WidgetBox`,
:class:`~bokeh.models.layouts.Row`,
:class:`~bokeh.models.layouts.Column`,
:class:`~bokeh.models.tools.ToolbarBox`,
:class:`~bokeh.models.layouts.Spacer`.
sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How
will the items in the layout resize to fill the available space.
Default is ``"fixed"``. For more information on the different
modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode`
description on :class:`~bokeh.models.layouts.LayoutDOM`.
responsive (``True``, ``False``): True sets ``sizing_mode`` to
``"width_ar"``. ``False`` sets sizing_mode to ``"fixed"``. Using
responsive will override sizing_mode.
Returns:
Column: A column of ``Row`` layouts of the children, all with the same sizing_mode.
Examples:
>>> layout([[plot_1, plot_2], [plot_3, plot_4]])
>>> layout(
children=[
[widget_box_1, plot_1],
[slider],
[widget_box_2, plot_2, plot_3]
],
sizing_mode='fixed',
)
"""
responsive = kwargs.pop('responsive', None)
sizing_mode = kwargs.pop('sizing_mode', 'fixed')
children = kwargs.pop('children', None)
if responsive:
sizing_mode = _convert_responsive(responsive)
_verify_sizing_mode(sizing_mode)
children = _handle_children(*args, children=children)
# Make the grid
rows = []
for r in children:
row_children = []
for item in r:
if isinstance(item, LayoutDOM):
item.sizing_mode = sizing_mode
row_children.append(item)
else:
raise ValueError(
"""Only LayoutDOM items can be inserted into a layout.
Tried to insert: %s of type %s""" % (item, type(item))
)
rows.append(row(children=row_children, sizing_mode=sizing_mode))
grid = column(children=rows, sizing_mode=sizing_mode)
return grid
def _chunks(l, ncols):
"""Yield successive n-sized chunks from list, l."""
assert isinstance(ncols, int), "ncols must be an integer"
for i in range(0, len(l), ncols):
yield l[i: i+ncols]
def gridplot(*args, **kwargs):
""" Create a grid of plots rendered on separate canvases. ``gridplot`` builds a single toolbar
for all the plots in the grid. ``gridplot`` is designed to layout a set of plots. For general
grid layout, use the :func:`~bokeh.layouts.layout` function.
Args:
children (list of lists of :class:`~bokeh.models.plots.Plot` ): An
array of plots to display in a grid, given as a list of lists of Plot
objects. To leave a position in the grid empty, pass None for that
position in the children list. OR list of :class:`~bokeh.models.plots.Plot` if called with
ncols. OR an instance of GridSpec.
sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How
will the items in the layout resize to fill the available space.
Default is ``"fixed"``. For more information on the different
modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode`
description on :class:`~bokeh.models.layouts.LayoutDOM`.
toolbar_location (``above``, ``below``, ``left``, ``right`` ): Where the
toolbar will be located, with respect to the grid. Default is
``above``. If set to None, no toolbar will be attached to the grid.
ncols ``Int`` (optional): Specify the number of columns you would like in your grid.
You must only pass an un-nested list of plots (as opposed to a list of lists of plots)
when using ncols.
responsive (``True``, ``False``): True sets ``sizing_mode`` to
``"width_ar"``. ``False`` sets sizing_mode to ``"fixed"``. Using
responsive will override sizing_mode.
plot_width (int, optional): The width you would like all your plots to be
plot_height (int, optional): The height you would like all your plots to be.
toolbar_options (dict, optional) : A dictionary of options that will be
used to construct the grid's toolbar (an instance of
:class:`~bokeh.models.tools.ToolbarBox`). If none is supplied,
ToolbarBox's defaults will be used.
merge_tools (``True``, ``False``): Combine tools from all child plots into
a single toolbar.
Returns:
Row or Column: A row or column containing the grid toolbar and the grid
of plots (depending on whether the toolbar is left/right or
above/below. The grid is always a Column of Rows of plots.
Examples:
>>> gridplot([[plot_1, plot_2], [plot_3, plot_4]])
>>> gridplot([plot_1, plot_2, plot_3, plot_4], ncols=2, plot_width=200, plot_height=100)
>>> gridplot(
children=[[plot_1, plot_2], [None, plot_3]],
toolbar_location='right'
sizing_mode='fixed',
toolbar_options=dict(logo='gray')
)
"""
toolbar_location = kwargs.get('toolbar_location', 'above')
sizing_mode = kwargs.get('sizing_mode', 'fixed')
children = kwargs.get('children')
responsive = kwargs.get('responsive')
toolbar_options = kwargs.get('toolbar_options')
plot_width = kwargs.get('plot_width')
plot_height = kwargs.get('plot_height')
ncols = kwargs.get('ncols')
merge_tools = kwargs.get('merge_tools', True)
# Integrity checks & set-up
if responsive:
sizing_mode = _convert_responsive(responsive)
_verify_sizing_mode(sizing_mode)
if toolbar_location:
if not hasattr(Location, toolbar_location):
raise ValueError("Invalid value of toolbar_location: %s" % toolbar_location)
children = _handle_children(*args, children=children)
if ncols:
if any(isinstance(child, list) for child in children):
raise ValueError("Cannot provide a nested list when using ncols")
children = list(_chunks(children, ncols))
# Additional children set-up for grid plot
if not children:
children = []
# Make the grid
tools = []
rows = []
for row in children:
row_tools = []
row_children = []
for item in row:
if merge_tools:
if item is not None:
for plot in item.select(dict(type=Plot)):
row_tools = row_tools + plot.toolbar.tools
plot.toolbar_location = None
if item is None:
for neighbor in row:
if isinstance(neighbor, Plot):
break
item = Spacer(width=neighbor.plot_width, height=neighbor.plot_height)
if isinstance(item, LayoutDOM):
item.sizing_mode = sizing_mode
if isinstance(item, Plot):
if plot_width:
item.plot_width = plot_width
if plot_height:
item.plot_height = plot_height
row_children.append(item)
else:
raise ValueError("Only LayoutDOM items can be inserted into Grid")
tools = tools + row_tools
rows.append(Row(children=row_children, sizing_mode=sizing_mode))
grid = Column(children=rows, sizing_mode=sizing_mode)
if not merge_tools:
return grid
# Make the toolbar
if toolbar_location:
if not toolbar_options:
toolbar_options = {}
if 'toolbar_location' not in toolbar_options:
toolbar_options['toolbar_location'] = toolbar_location
# Fixed sizing mode needs scale_width for the toolbar
# for layout to work correctly.
if sizing_mode == 'fixed':
toolbar_sizing_mode = 'scale_width'
else:
toolbar_sizing_mode = sizing_mode
toolbar = ToolbarBox(
tools=tools,
sizing_mode=toolbar_sizing_mode,
**toolbar_options
)
# Set up children
if toolbar_location == 'above':
return Column(children=[toolbar, grid], sizing_mode=sizing_mode)
elif toolbar_location == 'below':
return Column(children=[grid, toolbar], sizing_mode=sizing_mode)
elif toolbar_location == 'left':
return Row(children=[toolbar, grid], sizing_mode=sizing_mode)
elif toolbar_location == 'right':
return Row(children=[grid, toolbar], sizing_mode=sizing_mode)
else:
return grid
class GridSpec(object):
""" Simplifies grid layout specification. """
def __init__(self, nrows, ncols):
self.nrows = nrows
self.ncols = ncols
self._arrangement = {}
def __setitem__(self, key, obj):
k1, k2 = key
if isinstance(k1, slice):
row1, row2, _ = k1.indices(self.nrows)
else:
if k1 < 0:
k1 += self.nrows
if k1 >= self.nrows or k1 < 0:
raise IndexError("index out of range")<|fim▁hole|> row1, row2 = k1, None
if isinstance(k2, slice):
col1, col2, _ = k2.indices(self.ncols)
else:
if k2 < 0:
k2 += self.ncols
if k2 >= self.ncols or k2 < 0:
raise IndexError("index out of range")
col1, col2 = k2, None
# gs[row, col] = obj
# gs[row1:row2, col] = [...]
# gs[row, col1:col2] = [...]
# gs[row1:row2, col1:col2] = [[...], ...]
def get_or_else(fn, default):
try:
return fn()
except IndexError:
return default
if row2 is None and col2 is None:
self._arrangement[row1, col1] = obj
elif row2 is None:
for col in range(col1, col2):
self._arrangement[row1, col] = get_or_else(lambda: obj[col-col1], None)
elif col2 is None:
for row in range(row1, row2):
self._arrangement[row, col1] = get_or_else(lambda: obj[row-row1], None)
else:
for row, col in zip(range(row1, row2), range(col1, col2)):
self._arrangement[row, col] = get_or_else(lambda: obj[row-row1][col-col1], None)
def __iter__(self):
array = [ [ None ]*self.ncols for _ in range(0, self.nrows) ]
for (row, col), obj in self._arrangement.items():
array[row][col] = obj
return iter(array)<|fim▁end|> | |
<|file_name|>user.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Nitrate is copyright 2010 Red Hat, Inc.
#
# Nitrate is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version. This program is distributed in
# the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranties of TITLE, NON-INFRINGEMENT,
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# The GPL text is available in the file COPYING that accompanies this
# distribution and at <http://www.gnu.org/licenses>.
#
# Authors:
# Xuqing Kuang <[email protected]>
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from kobo.django.xmlrpc.decorators import user_passes_test, login_required, log_call
from nitrate.core.utils.xmlrpc import XMLRPCSerializer
__all__ = (
'filter',
'get',
'get_me',
'update',
)
def get_user_dict(user):
u = XMLRPCSerializer(model = user)
u = u.serialize_model()
if u.get('password'):
del u['password']
return u
@log_call
def filter(request, query):
"""
Description: Performs a search and returns the resulting list of test cases.
Params: $query - Hash: keys must match valid search fields.
+------------------------------------------------------------------+
| Case Search Parameters |
+------------------------------------------------------------------+
| Key | Valid Values |
| id | Integer: ID |
| username | String: User name |
| first_name | String: User first name |
| last_name | String: User last name |
| email | String Email |
| is_active | Boolean: Return the active users |
| groups | ForeignKey: AuthGroup |
+------------------------------------------------------------------+
Returns: Array: Matching test cases are retuned in a list of hashes.
Example:
>>> User.filter({'username__startswith': 'x'})
"""
users = User.objects.filter(**query)
return [get_user_dict(u) for u in users]
def get(request, id):
"""
Description: Used to load an existing test case from the database.
Params: $id - Integer/String: An integer representing the ID in the database
Returns: A blessed User object Hash
Example:
>>> User.get(2206)
"""
return get_user_dict(User.objects.get(pk = id))
def get_me(request):
"""
Description: Get the information of myself.
Returns: A blessed User object Hash
Example:
>>> User.get_me()
"""
return get_user_dict(request.user)
def update(request, values = {}, id = None):
"""
Description: Updates the fields of the selected user. it also can change the
informations of other people if you have permission.
Params: $values - Hash of keys matching TestCase fields and the new values
to set each field to.
$id - Integer/String(Optional)
Integer: A single TestCase ID.
String: A comma string of User ID.
Default: The ID of myself
Returns: A blessed User object Hash
+-------------------+----------------+-----------------------------------------+
| Field | Type | Null |
+-------------------+----------------+-----------------------------------------+
| first_name | String | Optional |
| last_name | String | Optional(Required if changes category) |
| email | String | Optional |
| password | String | Optional |
| old_password | String | Required by password |
+-------------------+----------------+-----------------------------------------+
Example:
>>> User.update({'first_name': 'foo'})
>>> User.update({'password': 'foo', 'old_password': '123'})
>>> User.update({'password': 'foo', 'old_password': '123'}, 2206)
"""
if id:
u = User.objects.get(pk = id)
else:
u = request.user
editable_fields = ['first_name', 'last_name', 'email', 'password']
if not request.user.has_perm('auth.change_changeuser') and request.user != u:
raise PermissionDenied
for f in editable_fields:
if values.get(f):
if f == 'password':
if not request.user.has_perm('auth.change_changeuser') and not values.get('old_password'):
raise PermissionDenied('Old password is required')
if not request.user.has_perm('auth.change_changeuser') and not u.check_password(values.get('old_password')):
raise PermissionDenied('Password is incorrect')
u.set_password(values['password'])
else:
setattr(u, f, values[f])
u.save()<|fim▁hole|><|fim▁end|> | return get_user_dict(u) |
<|file_name|>ImageOps.py<|end_file_name|><|fim▁begin|>#
# The Python Imaging Library.
# $Id$
#
# standard image operations
#
# History:
# 2001-10-20 fl Created
# 2001-10-23 fl Added autocontrast operator
# 2001-12-18 fl Added Kevin's fit operator
# 2004-03-14 fl Fixed potential division by zero in equalize
# 2005-05-05 fl Fixed equalize for low number of values
#
# Copyright (c) 2001-2004 by Secret Labs AB
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
import operator
##
# (New in 1.1.3) The <b>ImageOps</b> module contains a number of
# 'ready-made' image processing operations. This module is somewhat
# experimental, and most operators only work on L and RGB images.
#
# @since 1.1.3
##
#
# helpers
def _border(border):
if type(border) is type(()):
if len(border) == 2:
left, top = right, bottom = border
elif len(border) == 4:
left, top, right, bottom = border
else:
left = top = right = bottom = border
return left, top, right, bottom
def _color(color, mode):
if Image.isStringType(color):
import ImageColor
color = ImageColor.getcolor(color, mode)
return color
def _lut(image, lut):
if image.mode == "P":
# FIXME: apply to lookup table, not image data
raise NotImplementedError("mode P support coming soon")
elif image.mode in ("L", "RGB"):
if image.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return image.point(lut)
else:
raise IOError, "not supported for this image mode"
#
# actions
##
# Maximize (normalize) image contrast. This function calculates a
# histogram of the input image, removes <i>cutoff</i> percent of the
# lightest and darkest pixels from the histogram, and remaps the image
# so that the darkest pixel becomes black (0), and the lightest
# becomes white (255).
#
# @param image The image to process.
# @param cutoff How many percent to cut off from the histogram.
# @param ignore The background pixel value (use None for no background).
# @return An image.
def autocontrast(image, cutoff=0, ignore=None):
"Maximize image contrast, based on histogram"
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer:layer+256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff / 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] = h[lo] - cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff / 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] = h[hi] - cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(range(256))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
##
# Colorize grayscale image. The <i>black</i> and <i>white</i>
# arguments should be RGB tuples; this function calculates a colour
# wedge mapping all black pixels in the source image to the first
# colour, and all white pixels to the second colour.
#
# @param image The image to colourize.
# @param black The colour to use for black input pixels.
# @param white The colour to use for white input pixels.
# @return An image.
def colorize(image, black, white):
"Colorize a grayscale image"
assert image.mode == "L"
black = _color(black, "RGB")
white = _color(white, "RGB")
red = []; green = []; blue = []
for i in range(256):
red.append(black[0]+i*(white[0]-black[0])/255)
green.append(black[1]+i*(white[1]-black[1])/255)
blue.append(black[2]+i*(white[2]-black[2])/255)
image = image.convert("RGB")
return _lut(image, red + green + blue)
##
# Remove border from image. The same amount of pixels are removed
# from all four sides. This function works on all image modes.
#
# @param image The image to crop.
# @param border The number of pixels to remove.
# @return An image.
# @see Image#Image.crop
def crop(image, border=0):
"Crop border off image"
left, top, right, bottom = _border(border)
return image.crop(
(left, top, image.size[0]-right, image.size[1]-bottom)
)<|fim▁hole|># @param image The image to deform.
# @param deformer A deformer object. Any object that implements a
# <b>getmesh</b> method can be used.
# @param resample What resampling filter to use.
# @return An image.
def deform(image, deformer, resample=Image.BILINEAR):
"Deform image using the given deformer"
return image.transform(
image.size, Image.MESH, deformer.getmesh(image), resample
)
##
# Equalize the image histogram. This function applies a non-linear
# mapping to the input image, in order to create a uniform
# distribution of grayscale values in the output image.
#
# @param image The image to equalize.
# @param mask An optional mask. If given, only the pixels selected by
# the mask are included in the analysis.
# @return An image.
def equalize(image, mask=None):
"Equalize image histogram"
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = filter(None, h[b:b+256])
if len(histo) <= 1:
lut.extend(range(256))
else:
step = (reduce(operator.add, histo) - histo[-1]) / 255
if not step:
lut.extend(range(256))
else:
n = step / 2
for i in range(256):
lut.append(n / step)
n = n + h[i+b]
return _lut(image, lut)
##
# Add border to the image
#
# @param image The image to expand.
# @param border Border width, in pixels.
# @param fill Pixel fill value (a colour value). Default is 0 (black).
# @return An image.
def expand(image, border=0, fill=0):
"Add border to image"
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
out = Image.new(image.mode, (width, height), _color(fill, image.mode))
out.paste(image, (left, top))
return out
##
# Returns a sized and cropped version of the image, cropped to the
# requested aspect ratio and size.
# <p>
# The <b>fit</b> function was contributed by Kevin Cazabon.
#
# @param size The requested output size in pixels, given as a
# (width, height) tuple.
# @param method What resampling method to use. Default is Image.NEAREST.
# @param bleed Remove a border around the outside of the image (from all
# four edges. The value is a decimal percentage (use 0.01 for one
# percent). The default value is 0 (no border).
# @param centering Control the cropping position. Use (0.5, 0.5) for
# center cropping (e.g. if cropping the width, take 50% off of the
# left side, and therefore 50% off the right side). (0.0, 0.0)
# will crop from the top left corner (i.e. if cropping the width,
# take all of the crop off of the right side, and if cropping the
# height, take all of it off the bottom). (1.0, 0.0) will crop
# from the bottom left corner, etc. (i.e. if cropping the width,
# take all of the crop off the left side, and if cropping the height
# take none from the top, and therefore all off the bottom).
# @return An image.
def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)):
"""
This method returns a sized and cropped version of the image,
cropped to the aspect ratio and size that you request.
"""
# by Kevin Cazabon, Feb 17/2000
# [email protected]
# http://www.cazabon.com
# ensure inputs are valid
if type(centering) != type([]):
centering = [centering[0], centering[1]]
if centering[0] > 1.0 or centering[0] < 0.0:
centering [0] = 0.50
if centering[1] > 1.0 or centering[1] < 0.0:
centering[1] = 0.50
if bleed > 0.49999 or bleed < 0.0:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleedPixels = (
int((float(bleed) * float(image.size[0])) + 0.5),
int((float(bleed) * float(image.size[1])) + 0.5)
)
liveArea = (
bleedPixels[0], bleedPixels[1], image.size[0] - bleedPixels[0] - 1,
image.size[1] - bleedPixels[1] - 1
)
liveSize = (liveArea[2] - liveArea[0], liveArea[3] - liveArea[1])
# calculate the aspect ratio of the liveArea
liveAreaAspectRatio = float(liveSize[0])/float(liveSize[1])
# calculate the aspect ratio of the output image
aspectRatio = float(size[0]) / float(size[1])
# figure out if the sides or top/bottom will be cropped off
if liveAreaAspectRatio >= aspectRatio:
# liveArea is wider than what's needed, crop the sides
cropWidth = int((aspectRatio * float(liveSize[1])) + 0.5)
cropHeight = liveSize[1]
else:
# liveArea is taller than what's needed, crop the top and bottom
cropWidth = liveSize[0]
cropHeight = int((float(liveSize[0])/aspectRatio) + 0.5)
# make the crop
leftSide = int(liveArea[0] + (float(liveSize[0]-cropWidth) * centering[0]))
if leftSide < 0:
leftSide = 0
topSide = int(liveArea[1] + (float(liveSize[1]-cropHeight) * centering[1]))
if topSide < 0:
topSide = 0
out = image.crop(
(leftSide, topSide, leftSide + cropWidth, topSide + cropHeight)
)
# resize the image and return it
return out.resize(size, method)
##
# Flip the image vertically (top to bottom).
#
# @param image The image to flip.
# @return An image.
def flip(image):
"Flip image vertically"
return image.transpose(Image.FLIP_TOP_BOTTOM)
##
# Convert the image to grayscale.
#
# @param image The image to convert.
# @return An image.
def grayscale(image):
"Convert to grayscale"
return image.convert("L")
##
# Invert (negate) the image.
#
# @param image The image to invert.
# @return An image.
def invert(image):
"Invert image (negate)"
lut = []
for i in range(256):
lut.append(255-i)
return _lut(image, lut)
##
# Flip image horizontally (left to right).
#
# @param image The image to mirror.
# @return An image.
def mirror(image):
"Flip image horizontally"
return image.transpose(Image.FLIP_LEFT_RIGHT)
##
# Reduce the number of bits for each colour channel.
#
# @param image The image to posterize.
# @param bits The number of bits to keep for each channel (1-8).
# @return An image.
def posterize(image, bits):
"Reduce the number of bits per color channel"
lut = []
mask = ~(2**(8-bits)-1)
for i in range(256):
lut.append(i & mask)
return _lut(image, lut)
##
# Invert all pixel values above a threshold.
#
# @param image The image to posterize.
# @param threshold All pixels above this greyscale level are inverted.
# @return An image.
def solarize(image, threshold=128):
"Invert all values above threshold"
lut = []
for i in range(256):
if i < threshold:
lut.append(i)
else:
lut.append(255-i)
return _lut(image, lut)
# --------------------------------------------------------------------
# PIL USM components, from Kevin Cazabon.
def gaussian_blur(im, radius=None):
""" PIL_usm.gblur(im, [radius])"""
if radius is None:
radius = 5.0
im.load()
return im.im.gaussian_blur(radius)
gblur = gaussian_blur
def unsharp_mask(im, radius=None, percent=None, threshold=None):
""" PIL_usm.usm(im, [radius, percent, threshold])"""
if radius is None:
radius = 5.0
if percent is None:
percent = 150
if threshold is None:
threshold = 3
im.load()
return im.im.unsharp_mask(radius, percent, threshold)
usm = unsharp_mask<|fim▁end|> |
##
# Deform the image.
# |
<|file_name|>DataNodeHighlight.java<|end_file_name|><|fim▁begin|>/**
* Copyright (C) 2013 The Language Archive, Max Planck Institute for
* Psycholinguistics
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307, USA.
*/
package nl.mpi.yams.common.data;
import java.io.Serializable;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
/**
* Created on : Aug 28, 2013, 5:24:13 PM
*
* @author Peter Withers <[email protected]>
*/
@XmlRootElement(name = "Highlight")
public class DataNodeHighlight implements Serializable {
private String dataNodeId = null;
private String highlightPath = null;
public String getDataNodeId() {
return dataNodeId;
}
@XmlAttribute(name = "ID")
public void setDataNodeId(String dataNodeId) {
this.dataNodeId = dataNodeId;
}
public String getHighlightPath() {
return highlightPath;
}
@XmlAttribute(name = "Path")<|fim▁hole|> public void setHighlightPath(String highlightPath) {
this.highlightPath = highlightPath;
}
}<|fim▁end|> | |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
import jsonfield
from .hooks import hookset
from .utils import load_path_attr
class UserState(models.Model):
"""
this stores the overall state of a particular user.
"""
user = models.OneToOneField(User, null=True, on_delete=models.SET_NULL)
data = jsonfield.JSONField(default=dict, blank=True)
@classmethod
def for_user(cls, user):
assert user.is_authenticated(), "user must be authenticated"
user_state, _ = cls.objects.get_or_create(user=user)
return user_state
def get(self, key):
return self.data.get(key)
def set(self, key, value):
self.data[key] = value
self.save()
class ActivityState(models.Model):
"""
this stores the overall state of a particular user doing a particular
activity across all sessions of that activity.
"""
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
activity_key = models.CharField(max_length=300)
activity_class_path = models.CharField(max_length=300)
# how many sessions have been completed by this user
completed_count = models.IntegerField(default=0)
data = jsonfield.JSONField(default=dict, blank=True)
class Meta:
unique_together = [("user", "activity_key")]
@property
def activity_class(self):
return load_path_attr(self.activity_class_path)
@property
def in_progress(self):
return next(iter(self.sessions.filter(completed=None)), None)
@property
def latest(self):
session, _ = self.sessions.get_or_create(completed=None)
return session
@property
def last_completed(self):
return self.sessions.filter(completed__isnull=False).order_by("-started").first()
@property
def all_sessions(self):
return self.sessions.order_by("started")
@classmethod<|fim▁hole|> return cls.objects.filter(user=user, activity_key=activity_key).first()
@property
def progression(self):
if self.in_progress:
return "continue"
elif self.activity_class.repeatable:
return "repeat"
else:
return "completed"
class ActivitySessionState(models.Model):
"""
this stores the state of a particular session of a particular user
doing a particular activity.
"""
activity_state = models.ForeignKey(ActivityState, related_name="sessions", on_delete=models.CASCADE)
started = models.DateTimeField(default=timezone.now)
completed = models.DateTimeField(null=True) # NULL means in progress
data = jsonfield.JSONField(default=dict, blank=True)
class Meta:
unique_together = [("activity_state", "started")]
def mark_completed(self):
self.completed = timezone.now()
self.save()
self.activity_state.completed_count = models.F("completed_count") + 1
self.activity_state.save()
def activities_for_user(user):
activities = {
"available": [],
"inprogress": [],
"completed": [],
"repeatable": []
}
for key, activity_class_path in hookset.all_activities():
activity = load_path_attr(activity_class_path)
state = ActivityState.state_for_user(user, key)
user_num_completions = ActivitySessionState.objects.filter(
user=user,
activity_key=key,
completed__isnull=False
).count()
activity_entry = {
"activity_key": key,
"title": activity.title,
"description": activity.description,
"state": state,
"user_num_completions": user_num_completions,
"repeatable": activity.repeatable,
}
if state:
if state.in_progress:
activities["inprogress"].append(activity_entry)
elif activity.repeatable:
activities["repeatable"].append(activity_entry)
else:
activities["completed"].append(activity_entry)
else:
activities["available"].append(activity_entry)
return activities<|fim▁end|> | def state_for_user(cls, user, activity_key):
assert user.is_authenticated(), "user must be authenticated" |
<|file_name|>protractor.js<|end_file_name|><|fim▁begin|>var numLimitInput = element(by.model('numLimit'));
var letterLimitInput = element(by.model('letterLimit'));
var longNumberLimitInput = element(by.model('longNumberLimit'));
var limitedNumbers = element(by.binding('numbers | limitTo:numLimit'));
var limitedLetters = element(by.binding('letters | limitTo:letterLimit'));
var limitedLongNumber = element(by.binding('longNumber | limitTo:longNumberLimit'));
it('should limit the number array to first three items', function() {
expect(numLimitInput.getAttribute('value')).toBe('3');
expect(letterLimitInput.getAttribute('value')).toBe('3');
expect(longNumberLimitInput.getAttribute('value')).toBe('3');
expect(limitedNumbers.getText()).toEqual('Output numbers: [1,2,3]');
expect(limitedLetters.getText()).toEqual('Output letters: abc');
expect(limitedLongNumber.getText()).toEqual('Output long number: 234');<|fim▁hole|> // numLimitInput.clear();
// numLimitInput.sendKeys('-3');
// letterLimitInput.clear();
// letterLimitInput.sendKeys('-3');
// longNumberLimitInput.clear();
// longNumberLimitInput.sendKeys('-3');
// expect(limitedNumbers.getText()).toEqual('Output numbers: [7,8,9]');
// expect(limitedLetters.getText()).toEqual('Output letters: ghi');
// expect(limitedLongNumber.getText()).toEqual('Output long number: 342');
// });
it('should not exceed the maximum size of input array', function() {
numLimitInput.clear();
numLimitInput.sendKeys('100');
letterLimitInput.clear();
letterLimitInput.sendKeys('100');
longNumberLimitInput.clear();
longNumberLimitInput.sendKeys('100');
expect(limitedNumbers.getText()).toEqual('Output numbers: [1,2,3,4,5,6,7,8,9]');
expect(limitedLetters.getText()).toEqual('Output letters: abcdefghi');
expect(limitedLongNumber.getText()).toEqual('Output long number: 2345432342');
});<|fim▁end|> | });
// There is a bug in safari and protractor that doesn't like the minus key
// it('should update the output when -3 is entered', function() { |
<|file_name|>column.py<|end_file_name|><|fim▁begin|># Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..extern.six.moves import zip
import warnings
import weakref
from copy import deepcopy
import numpy as np
from numpy import ma
from ..units import Unit, Quantity
from ..utils.compat import NUMPY_LT_1_8
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, dtype_info_name
from ..extern.six.moves import range
from . import groups
from . import pprint
from .np_utils import fix_column_name
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
INTEGER_TYPES = (int, long, np.integer) if six.PY2 else (int, np.integer)
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = set(
[np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])
def col_copy(col, copy_indices=True):
"""
This is a mixin-safe version of Column.copy() (with copy_data=True).
"""
if isinstance(col, BaseColumn):
return col.copy()
# The new column should have None for the parent_table ref. If the
# original parent_table weakref there at the point of copying then it
# generates an infinite recursion. Instead temporarily remove the weakref
# on the original column and restore after the copy in an exception-safe
# manner.
parent_table = col.info.parent_table
indices = col.info.indices
col.info.parent_table = None
col.info.indices = []
try:
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
newcol.info = col.info
newcol.info.indices = deepcopy(indices or []) if copy_indices else []
for index in newcol.info.indices:
index.replace_col(col, newcol)
finally:
col.info.parent_table = parent_table
col.info.indices = indices
return newcol
class FalseArray(np.ndarray):
def __new__(cls, shape):
obj = np.zeros(shape, dtype=np.bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {0} class to True'
.format(self.__class__.__name__))
if six.PY2: # avoid falling back to ndarray.__setslice__
def __setslice__(self, start, stop, val):
self.__setitem__(slice(start, stop), val)
class ColumnInfo(BaseColumnInfo):
attrs_from_parent = BaseColumnInfo.attr_names
_supports_indexing = True
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
dtype = (np.dtype(dtype).str, shape)
self_data = np.zeros(length, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = deepcopy(data.meta)
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = np.array(data.to(unit), dtype=dtype, copy=copy)
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = deepcopy(data.info.meta)
else:
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = fix_column_name(name)
self.unit = unit
self.format = format
self.description = description
self.meta = meta
self._parent_table = None
self.indices = deepcopy(getattr(data, 'indices', [])) if \
copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def parent_table(self):
if self._parent_table is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', 'unit', 'format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super(type(self), self).__setstate__() gives an infinite
# recursion. Manually call the right super class to actually set up
# the array object.<|fim▁hole|>
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
# avoid == and != to be done based on type of subclass
# (helped solve #1446; see also __array_wrap__)
def __eq__(self, other):
return self.data.__eq__(other)
def __ne__(self, other):
return self.data.__ne__(other)
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if six.callable(super(BaseColumn, self).__array_finalize__):
super(BaseColumn, self).__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super(BaseColumn, self).__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape or
(isinstance(out_arr, BaseColumn) and
(context is not None and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
val = fix_column_name(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={}):
yield str_val
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : boolean
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name (default=True)
show_unit : bool
Include a header row for unit (default=False)
show_dtype : bool
Include column dtype (default=False)
html : bool
Format the output as an HTML table (default=False)
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name (default=True)
show_unit : bool
Include a header row for unit (default=False)
show_dtype : bool
Include column dtype (default=True)
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
show_name : bool
Include a header row for column names (default=True)
show_unit : bool
Include a header row for unit (default=False)
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, copy=False, dtype=self.dtype, order='A')
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : `~astropy.units.Unit` or str
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of equivalence pairs, optional
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', 'format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
self.meta = deepcopy(getattr(obj, 'meta', {}))
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super(Column, cls).__new__(cls, data=data, name=name, dtype=dtype,
shape=shape, length=length, description=description,
unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super(Column, self).__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append('{0}={1}'.format(attr, repr(val)))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from ..utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
if six.PY2 and isinstance(out, six.text_type):
out = out.encode('utf-8')
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __unicode__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
if not six.PY2:
__str__ = __unicode__
def __bytes__(self):
return six.text_type(self).encode('utf-8')
if six.PY2:
__str__ = __bytes__
def _check_string_truncate(self, value):
value = np.asanyarray(value, dtype=self.dtype.type)
if value.dtype.itemsize > self.dtype.itemsize:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self.dtype.str[2:]),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
if six.PY2:
# avoid falling through to ndarray.__setslice__, instead using
# self.__setitem__, which is much faster (see above). [#3020]
def __setslice__(self, start, stop, value):
self.__setitem__(slice(start, stop), value)
def insert(self, obj, values):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=0)
data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
data = np.insert(self, obj, values, axis=0)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str or None
Value used when filling masked column elements
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None and hasattr(data, 'mask'):
mask = data.mask
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds (see _fix_fill_value below).
if fill_value is None and hasattr(data, 'fill_value') and data.fill_value is not None:
# Coerce the fill_value to the correct type since `data` may be a
# different dtype than self.
fill_value = self.dtype.type(data.fill_value)
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
def _fix_fill_value(self, val):
"""Fix a fill value (if needed) to work around a bug with setting the fill
value of a string array in MaskedArray with Python 3.x. See
https://github.com/numpy/numpy/pull/2733. This mimics the check in
numpy.ma.core._check_fill_value() (version < 1.8) which incorrectly sets
fill_value to a default if self.dtype.char is 'U' (which is the case for Python
3). Here we change the string to a byte string so that in Python 3 the
isinstance(val, basestring) part fails.
"""
if (NUMPY_LT_1_8 and isinstance(val, six.string_types) and
(self.dtype.char not in 'SV')):
val = val.encode()
return val
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
val = self._fix_fill_value(val)
# Yet another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
out = self.view(ma.MaskedArray)
# The following is necessary because of a bug in Numpy, which was
# fixed in numpy/numpy#2703. The fix should be included in Numpy 1.8.0.
out.fill_value = self.fill_value
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
fill_value = self._fix_fill_value(fill_value)
data = super(MaskedColumn, self).filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
mask : boolean array_like
Mask value(s) to insert. If not supplied then False is used.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=0)
new_data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
new_data = np.insert(self_ma.data, obj, values, axis=0)
if mask is None:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(values.shape, dtype=np.bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=0)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# update indices
self.info.adjust_indices(index, value, len(self))
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to<|fim▁end|> | super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state) |
<|file_name|>pastWeekTime.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python<|fim▁hole|>import os
files = [TIME_WORKED]
print get_total_time_by_day_files(files)
raw_input()<|fim▁end|> | from TimeWorked import * |
<|file_name|>test_list_all_files_in_directory.py<|end_file_name|><|fim▁begin|>from raptiformica.utils import list_all_files_in_directory
from tests.testcase import TestCase
class TestListAllFilesInDirectory(TestCase):
def setUp(self):
self.walk = self.set_up_patch('raptiformica.utils.walk')
self.walk.return_value = [
('/tmp/a/directory', ['dir'], ['file.txt', 'file2.txt']),
('/tmp/a/directory/dir', ['dir2'], ['file3.txt']),
('/tmp/a/directory/dir/dir2', [], ['file5.txt', 'file4.txt'])<|fim▁hole|> ]
def test_list_all_files_in_directory_lists_all_files_in_directory_walks_path(self):
list_all_files_in_directory('/tmp/a/directory')
self.walk.assert_called_once_with('/tmp/a/directory')
def test_list_all_files_in_directory_returns_all_files(self):
ret = list_all_files_in_directory('/tmp/a/directory')
expected_list = [
'/tmp/a/directory/file.txt',
'/tmp/a/directory/file2.txt',
'/tmp/a/directory/dir/file3.txt',
'/tmp/a/directory/dir/dir2/file4.txt',
'/tmp/a/directory/dir/dir2/file5.txt'
]
self.assertCountEqual(ret, expected_list)<|fim▁end|> | |
<|file_name|>client.py<|end_file_name|><|fim▁begin|># *****************************************************************************
# Copyright (c) 2014, 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from datetime import datetime
import json
import logging
import threading
import paho.mqtt.client as paho
import pytz
from wiotp.sdk import (
AbstractClient,
ConfigurationException,
ConnectionException,
MissingMessageEncoderException,
InvalidEventException,
)
from wiotp.sdk.device.command import Command
from wiotp.sdk.device.config import DeviceClientConfig
class DeviceClient(AbstractClient):
"""
Extends #wiotp.common.AbstractClient to implement a device client supporting
messaging over MQTT
# Parameters
options (dict): Configuration options for the client
logHandlers (list<logging.Handler>): Log handlers to configure. Defaults to `None`,
which will result in a default log handler being created.
"""
_COMMAND_TOPIC = "iot-2/cmd/+/fmt/+"
def __init__(self, config, logHandlers=None):
self._config = DeviceClientConfig(**config)
AbstractClient.__init__(
self,
domain=self._config.domain,
organization=self._config.orgId,
clientId=self._config.clientId,
username=self._config.username,
password=self._config.password,
port=self._config.port,
transport=self._config.transport,
cleanStart=self._config.cleanStart,
sessionExpiry=self._config.sessionExpiry,
keepAlive=self._config.keepAlive,
caFile=self._config.caFile,
logLevel=self._config.logLevel,
logHandlers=logHandlers,
)
# Add handler for commands if not connected to QuickStart
if not self._config.isQuickstart():
self.client.message_callback_add("iot-2/cmd/+/fmt/+", self._onCommand)
# Initialize user supplied callback
self.commandCallback = None
# Register startup subscription list (only for non-Quickstart)
if not self._config.isQuickstart():
self._subscriptions[self._COMMAND_TOPIC] = 1
def publishEvent(self, eventId, msgFormat, data, qos=0, onPublish=None):
"""
Publish an event to Watson IoT Platform.
# Parameters
eventId (string): Name of this event
msgFormat (string): Format of the data for this event
data (dict): Data for this event
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
onPublish(function): A function that will be called when receipt
of the publication is confirmed.
# Callback and QoS
The use of the optional #onPublish function has different implications depending
on the level of qos used to publish the event:
- qos 0: the client has asynchronously begun to send the event
- qos 1 and 2: the client has confirmation of delivery from the platform
"""
topic = "iot-2/evt/{eventId}/fmt/{msgFormat}".format(eventId=eventId, msgFormat=msgFormat)
return self._publishEvent(topic, eventId, msgFormat, data, qos, onPublish)
def _onCommand(self, client, userdata, pahoMessage):
"""
Internal callback for device command messages, parses source device from topic string and
passes the information on to the registered device command callback
"""
try:
command = Command(pahoMessage, self._messageCodecs)
except InvalidEventException as e:
self.logger.critical(str(e))
else:
self.logger.debug("Received command '%s'" % (command.commandId))
if self.commandCallback:<|fim▁hole|><|fim▁end|> | self.commandCallback(command) |
<|file_name|>currency.rs<|end_file_name|><|fim▁begin|>use params::to_snakecase;
/// Currency is the list of supported currencies.
///
/// For more details see https://support.stripe.com/questions/which-currencies-does-stripe-support.
#[derive(Copy, Clone, Debug, Deserialize, Serialize, Eq, PartialEq, Hash)]
pub enum Currency {
#[serde(rename = "aed")]
AED, // United Arab Emirates Dirham
#[serde(rename = "afn")]
AFN, // Afghan Afghani
#[serde(rename = "all")]
ALL, // Albanian Lek
#[serde(rename = "amd")]
AMD, // Armenian Dram
#[serde(rename = "ang")]
ANG, // Netherlands Antillean Gulden
#[serde(rename = "aoa")]
AOA, // Angolan Kwanza
#[serde(rename = "ars")]
ARS, // Argentine Peso
#[serde(rename = "aud")]
AUD, // Australian Dollar
#[serde(rename = "awg")]
AWG, // Aruban Florin
#[serde(rename = "azn")]
AZN, // Azerbaijani Manat
#[serde(rename = "bam")]
BAM, // Bosnia & Herzegovina Convertible Mark
#[serde(rename = "bbd")]
BBD, // Barbadian Dollar
#[serde(rename = "bdt")]
BDT, // Bangladeshi Taka
#[serde(rename = "bgn")]
BGN, // Bulgarian Lev
#[serde(rename = "bif")]
BIF, // Burundian Franc
#[serde(rename = "bmd")]
BMD, // Bermudian Dollar
#[serde(rename = "bnd")]
BND, // Brunei Dollar
#[serde(rename = "bob")]
BOB, // Bolivian Boliviano
#[serde(rename = "brl")]
BRL, // Brazilian Real
#[serde(rename = "bsd")]
BSD, // Bahamian Dollar
#[serde(rename = "bwp")]
BWP, // Botswana Pula
#[serde(rename = "bzd")]
BZD, // Belize Dollar
#[serde(rename = "cad")]
CAD, // Canadian Dollar
#[serde(rename = "cdf")]
CDF, // Congolese Franc
#[serde(rename = "chf")]
CHF, // Swiss Franc
#[serde(rename = "clp")]
CLP, // Chilean Peso
#[serde(rename = "cny")]
CNY, // Chinese Renminbi Yuan
#[serde(rename = "cop")]
COP, // Colombian Peso
#[serde(rename = "crc")]
CRC, // Costa Rican Colón
#[serde(rename = "cve")]
CVE, // Cape Verdean Escudo
#[serde(rename = "czk")]
CZK, // Czech Koruna
#[serde(rename = "djf")]
DJF, // Djiboutian Franc
#[serde(rename = "dkk")]
DKK, // Danish Krone
#[serde(rename = "dop")]
DOP, // Dominican Peso
#[serde(rename = "dzd")]
DZD, // Algerian Dinar
#[serde(rename = "eek")]
EEK, // Estonian Kroon
#[serde(rename = "egp")]
EGP, // Egyptian Pound
#[serde(rename = "etb")]
ETB, // Ethiopian Birr
#[serde(rename = "eur")]
EUR, // Euro
#[serde(rename = "fjd")]
FJD, // Fijian Dollar
#[serde(rename = "fkp")]
FKP, // Falkland Islands Pound
#[serde(rename = "gbp")]
GBP, // British Pound
#[serde(rename = "gel")]
GEL, // Georgian Lari
#[serde(rename = "gip")]
GIP, // Gibraltar Pound
#[serde(rename = "gmd")]
GMD, // Gambian Dalasi
#[serde(rename = "gnf")]
GNF, // Guinean Franc
#[serde(rename = "gtq")]
GTQ, // Guatemalan Quetzal
#[serde(rename = "gyd")]
GYD, // Guyanese Dollar
#[serde(rename = "hkd")]
HKD, // Hong Kong Dollar
#[serde(rename = "hnl")]
HNL, // Honduran Lempira
#[serde(rename = "hrk")]
HRK, // Croatian Kuna
#[serde(rename = "htg")]
HTG, // Haitian Gourde
#[serde(rename = "huf")]
HUF, // Hungarian Forint
#[serde(rename = "idr")]
IDR, // Indonesian Rupiah
#[serde(rename = "ils")]
ILS, // Israeli New Sheqel
#[serde(rename = "inr")]
INR, // Indian Rupee
#[serde(rename = "isk")]
ISK, // Icelandic Króna
#[serde(rename = "jmd")]
JMD, // Jamaican Dollar
#[serde(rename = "jpy")]
JPY, // Japanese Yen
#[serde(rename = "kes")]
KES, // Kenyan Shilling
#[serde(rename = "kgs")]
KGS, // Kyrgyzstani Som
#[serde(rename = "khr")]
KHR, // Cambodian Riel
#[serde(rename = "kmf")]
KMF, // Comorian Franc
#[serde(rename = "krw")]
KRW, // South Korean Won
#[serde(rename = "kyd")]
KYD, // Cayman Islands Dollar
#[serde(rename = "kzt")]
KZT, // Kazakhstani Tenge
#[serde(rename = "lak")]
LAK, // Lao Kip
#[serde(rename = "lbp")]
LBP, // Lebanese Pound
#[serde(rename = "lkr")]
LKR, // Sri Lankan Rupee
#[serde(rename = "lrd")]
LRD, // Liberian Dollar
#[serde(rename = "lsl")]
LSL, // Lesotho Loti
#[serde(rename = "ltl")]
LTL, // Lithuanian Litas
#[serde(rename = "lvl")]
LVL, // Latvian Lats
#[serde(rename = "mad")]
MAD, // Moroccan Dirham
#[serde(rename = "mdl")]
MDL, // Moldovan Leu
#[serde(rename = "mga")]
MGA, // Malagasy Ariary
#[serde(rename = "mkd")]
MKD, // Macedonian Denar
#[serde(rename = "mnt")]
MNT, // Mongolian Tögrög
#[serde(rename = "mop")]
MOP, // Macanese Pataca
#[serde(rename = "mro")]
MRO, // Mauritanian Ouguiya
#[serde(rename = "mur")]
MUR, // Mauritian Rupee
#[serde(rename = "mvr")]
MVR, // Maldivian Rufiyaa
#[serde(rename = "mwk")]
MWK, // Malawian Kwacha
#[serde(rename = "mxn")]
MXN, // Mexican Peso
#[serde(rename = "myr")]
MYR, // Malaysian Ringgit
#[serde(rename = "mzn")]
MZN, // Mozambican Metical
#[serde(rename = "nad")]
NAD, // Namibian Dollar
#[serde(rename = "ngn")]
NGN, // Nigerian Naira
#[serde(rename = "nio")]
NIO, // Nicaraguan Córdoba
#[serde(rename = "nok")]
NOK, // Norwegian Krone
#[serde(rename = "npr")]
NPR, // Nepalese Rupee
#[serde(rename = "nzd")]
NZD, // New Zealand Dollar
#[serde(rename = "pab")]
PAB, // Panamanian Balboa
#[serde(rename = "pen")]
PEN, // Peruvian Nuevo Sol
#[serde(rename = "pgk")]
PGK, // Papua New Guinean Kina
#[serde(rename = "php")]
PHP, // Philippine Peso
#[serde(rename = "pkr")]
PKR, // Pakistani Rupee
#[serde(rename = "pln")]
PLN, // Polish Złoty
#[serde(rename = "pyg")]
PYG, // Paraguayan Guaraní
#[serde(rename = "qar")]
QAR, // Qatari Riyal
#[serde(rename = "ron")]
RON, // Romanian Leu
#[serde(rename = "rsd")]
RSD, // Serbian Dinar
#[serde(rename = "rub")]
RUB, // Russian Ruble
#[serde(rename = "rwf")]
RWF, // Rwandan Franc
#[serde(rename = "sar")]
SAR, // Saudi Riyal
#[serde(rename = "sbd")]
SBD, // Solomon Islands Dollar
#[serde(rename = "scr")]
SCR, // Seychellois Rupee
#[serde(rename = "sek")]
SEK, // Swedish Krona
#[serde(rename = "sgd")]
SGD, // Singapore Dollar
#[serde(rename = "shp")]
SHP, // Saint Helenian Pound
#[serde(rename = "sll")]
SLL, // Sierra Leonean Leone
#[serde(rename = "sos")]
SOS, // Somali Shilling
#[serde(rename = "srd")]
SRD, // Surinamese Dollar
#[serde(rename = "std")]
STD, // São Tomé and Príncipe Dobra
#[serde(rename = "svc")]
SVC, // Salvadoran Colón
#[serde(rename = "szl")]
SZL, // Swazi Lilangeni
#[serde(rename = "thb")]
THB, // Thai Baht
#[serde(rename = "tjs")]
TJS, // Tajikistani Somoni
#[serde(rename = "top")]
TOP, // Tongan Paʻanga
#[serde(rename = "try")]
TRY, // Turkish Lira
#[serde(rename = "ttd")]
TTD, // Trinidad and Tobago Dollar
#[serde(rename = "twd")]
TWD, // New Taiwan Dollar
#[serde(rename = "tzs")]
TZS, // Tanzanian Shilling
#[serde(rename = "uah")]
UAH, // Ukrainian Hryvnia
#[serde(rename = "ugx")]
UGX, // Ugandan Shilling
#[serde(rename = "usd")]
USD, // United States Dollar
#[serde(rename = "uyu")]
UYU, // Uruguayan Peso
#[serde(rename = "uzs")]
UZS, // Uzbekistani Som
#[serde(rename = "vef")]
VEF, // Venezuelan Bolívar
#[serde(rename = "vnd")]
VND, // Vietnamese Đồng
#[serde(rename = "vuv")]
VUV, // Vanuatu Vatu
#[serde(rename = "wst")]
WST, // Samoan Tala
#[serde(rename = "xaf")]
XAF, // Central African Cfa Franc
#[serde(rename = "xcd")]
XCD, // East Caribbean Dollar
#[serde(rename = "xof")]
XOF, // West African Cfa Franc
#[serde(rename = "xpf")]
XPF, // Cfp Franc
#[serde(rename = "yer")]
YER, // Yemeni Rial
#[serde(rename = "zar")]
ZAR, // South African Rand
#[serde(rename = "zmw")]
ZMW, // Zambian Kwacha
}
impl Default for Currency {
fn default() -> Self {
Currency::USD
}
}
impl ::std::fmt::Display for Currency {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}", to_snakecase(&format!("{:?}", self)))
}
}
impl ::std::str::FromStr for Currency {
type Err = ParseCurrencyError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"aed" => Ok(Currency::AED),
"afn" => Ok(Currency::AFN),
"all" => Ok(Currency::ALL),
"amd" => Ok(Currency::AMD),
"ang" => Ok(Currency::ANG),
"aoa" => Ok(Currency::AOA),
"ars" => Ok(Currency::ARS),
"aud" => Ok(Currency::AUD),
"awg" => Ok(Currency::AWG),
"azn" => Ok(Currency::AZN),
"bam" => Ok(Currency::BAM),
"bbd" => Ok(Currency::BBD),
"bdt" => Ok(Currency::BDT),
"bgn" => Ok(Currency::BGN),
"bif" => Ok(Currency::BIF),
"bmd" => Ok(Currency::BMD),
"bnd" => Ok(Currency::BND),
"bob" => Ok(Currency::BOB),
"brl" => Ok(Currency::BRL),
"bsd" => Ok(Currency::BSD),
"bwp" => Ok(Currency::BWP),
"bzd" => Ok(Currency::BZD),
"cad" => Ok(Currency::CAD),
"cdf" => Ok(Currency::CDF),
"chf" => Ok(Currency::CHF),
"clp" => Ok(Currency::CLP),
"cny" => Ok(Currency::CNY),
"cop" => Ok(Currency::COP),
"crc" => Ok(Currency::CRC),
"cve" => Ok(Currency::CVE),
"czk" => Ok(Currency::CZK),
"djf" => Ok(Currency::DJF),
"dkk" => Ok(Currency::DKK),
"dop" => Ok(Currency::DOP),
"dzd" => Ok(Currency::DZD),
"eek" => Ok(Currency::EEK),
"egp" => Ok(Currency::EGP),
"etb" => Ok(Currency::ETB),
"eur" => Ok(Currency::EUR),
"fjd" => Ok(Currency::FJD),
"fkp" => Ok(Currency::FKP),
"gbp" => Ok(Currency::GBP),
"gel" => Ok(Currency::GEL),
"gip" => Ok(Currency::GIP),
"gmd" => Ok(Currency::GMD),
"gnf" => Ok(Currency::GNF),
"gtq" => Ok(Currency::GTQ),
"gyd" => Ok(Currency::GYD),
"hkd" => Ok(Currency::HKD),
"hnl" => Ok(Currency::HNL),
"hrk" => Ok(Currency::HRK),
"htg" => Ok(Currency::HTG),
"huf" => Ok(Currency::HUF),
"idr" => Ok(Currency::IDR),
"ils" => Ok(Currency::ILS),
"inr" => Ok(Currency::INR),
"isk" => Ok(Currency::ISK),
"jmd" => Ok(Currency::JMD),
"jpy" => Ok(Currency::JPY),
"kes" => Ok(Currency::KES),
"kgs" => Ok(Currency::KGS),
"khr" => Ok(Currency::KHR),
"kmf" => Ok(Currency::KMF),
"krw" => Ok(Currency::KRW),
"kyd" => Ok(Currency::KYD),
"kzt" => Ok(Currency::KZT),
"lak" => Ok(Currency::LAK),
"lbp" => Ok(Currency::LBP),
"lkr" => Ok(Currency::LKR),
"lrd" => Ok(Currency::LRD),
"lsl" => Ok(Currency::LSL),
"ltl" => Ok(Currency::LTL),
"lvl" => Ok(Currency::LVL),
"mad" => Ok(Currency::MAD),
"mdl" => Ok(Currency::MDL),
"mga" => Ok(Currency::MGA),
"mkd" => Ok(Currency::MKD),
"mnt" => Ok(Currency::MNT),
"mop" => Ok(Currency::MOP),
"mro" => Ok(Currency::MRO),
"mur" => Ok(Currency::MUR),
"mvr" => Ok(Currency::MVR),
"mwk" => Ok(Currency::MWK),
"mxn" => Ok(Currency::MXN),
"myr" => Ok(Currency::MYR),
"mzn" => Ok(Currency::MZN),
"nad" => Ok(Currency::NAD),
"ngn" => Ok(Currency::NGN),
"nio" => Ok(Currency::NIO),
"nok" => Ok(Currency::NOK),
"npr" => Ok(Currency::NPR),
"nzd" => Ok(Currency::NZD),
"pab" => Ok(Currency::PAB),
"pen" => Ok(Currency::PEN),
"pgk" => Ok(Currency::PGK),
"php" => Ok(Currency::PHP),
"pkr" => Ok(Currency::PKR),
"pln" => Ok(Currency::PLN),
"pyg" => Ok(Currency::PYG),
"qar" => Ok(Currency::QAR),
"ron" => Ok(Currency::RON),
"rsd" => Ok(Currency::RSD),
"rub" => Ok(Currency::RUB),
"rwf" => Ok(Currency::RWF),
"sar" => Ok(Currency::SAR),
"sbd" => Ok(Currency::SBD),
"scr" => Ok(Currency::SCR),
"sek" => Ok(Currency::SEK),
"sgd" => Ok(Currency::SGD),
"shp" => Ok(Currency::SHP),
"sll" => Ok(Currency::SLL),
"sos" => Ok(Currency::SOS),
"srd" => Ok(Currency::SRD),
"std" => Ok(Currency::STD),
"svc" => Ok(Currency::SVC),
"szl" => Ok(Currency::SZL),<|fim▁hole|> "top" => Ok(Currency::TOP),
"try" => Ok(Currency::TRY),
"ttd" => Ok(Currency::TTD),
"twd" => Ok(Currency::TWD),
"tzs" => Ok(Currency::TZS),
"uah" => Ok(Currency::UAH),
"ugx" => Ok(Currency::UGX),
"usd" => Ok(Currency::USD),
"uyu" => Ok(Currency::UYU),
"uzs" => Ok(Currency::UZS),
"vef" => Ok(Currency::VEF),
"vnd" => Ok(Currency::VND),
"vuv" => Ok(Currency::VUV),
"wst" => Ok(Currency::WST),
"xaf" => Ok(Currency::XAF),
"xcd" => Ok(Currency::XCD),
"xof" => Ok(Currency::XOF),
"xpf" => Ok(Currency::XPF),
"yer" => Ok(Currency::YER),
"zar" => Ok(Currency::ZAR),
"zmw" => Ok(Currency::ZMW),
_ => Err(ParseCurrencyError(()))
}
}
}
#[derive(Debug)]
pub struct ParseCurrencyError(/* private */ ());
impl ::std::fmt::Display for ParseCurrencyError {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
fmt.write_str(::std::error::Error::description(self))
}
}
impl ::std::error::Error for ParseCurrencyError {
fn description(&self) -> &str {
"unknown currency code"
}
}<|fim▁end|> | "thb" => Ok(Currency::THB),
"tjs" => Ok(Currency::TJS), |
<|file_name|>DoiFetcher.java<|end_file_name|><|fim▁begin|>package org.jabref.logic.importer.fetcher;
import java.io.IOException;
import java.net.URL;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.formatter.bibtexfields.ClearFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizePagesFormatter;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.EntryBasedFetcher;
import org.jabref.logic.importer.FetcherException;<|fim▁hole|>import org.jabref.logic.importer.IdBasedFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.net.URLDownload;
import org.jabref.model.cleanup.FieldFormatterCleanup;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.DOI;
import org.jabref.model.util.DummyFileUpdateMonitor;
import org.jabref.model.util.OptionalUtil;
public class DoiFetcher implements IdBasedFetcher, EntryBasedFetcher {
public static final String NAME = "DOI";
private final ImportFormatPreferences preferences;
public DoiFetcher(ImportFormatPreferences preferences) {
this.preferences = preferences;
}
@Override
public String getName() {
return DoiFetcher.NAME;
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_DOI);
}
@Override
public Optional<BibEntry> performSearchById(String identifier) throws FetcherException {
Optional<DOI> doi = DOI.parse(identifier);
try {
if (doi.isPresent()) {
URL doiURL = new URL(doi.get().getURIAsASCIIString());
// BibTeX data
URLDownload download = new URLDownload(doiURL);
download.addHeader("Accept", "application/x-bibtex");
String bibtexString = download.asString();
// BibTeX entry
Optional<BibEntry> fetchedEntry = BibtexParser.singleFromString(bibtexString, preferences, new DummyFileUpdateMonitor());
fetchedEntry.ifPresent(this::doPostCleanup);
return fetchedEntry;
} else {
throw new FetcherException(Localization.lang("Invalid DOI: '%0'.", identifier));
}
} catch (IOException e) {
throw new FetcherException(Localization.lang("Connection error"), e);
} catch (ParseException e) {
throw new FetcherException("Could not parse BibTeX entry", e);
}
}
private void doPostCleanup(BibEntry entry) {
new FieldFormatterCleanup(StandardField.PAGES, new NormalizePagesFormatter()).cleanup(entry);
new FieldFormatterCleanup(StandardField.URL, new ClearFormatter()).cleanup(entry);
}
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
Optional<String> doi = entry.getField(StandardField.DOI);
if (doi.isPresent()) {
return OptionalUtil.toList(performSearchById(doi.get()));
} else {
return Collections.emptyList();
}
}
}<|fim▁end|> | |
<|file_name|>window.py<|end_file_name|><|fim▁begin|>from gi.repository import Gtk, Gdk, GdkPixbuf
(TARGET_ENTRY_TEXT, TARGET_ENTRY_PIXBUF) = range(2)
(COLUMN_TEXT, COLUMN_PIXBUF) = range(2)
DRAG_ACTION = Gdk.DragAction.COPY
class DragDropWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Drag and Drop")
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(vbox)
hbox = Gtk.Box(spacing=12)
vbox.pack_start(hbox, True, True, 0)
self.iconview = DragSourceIconView()
self.drop_area = DropArea()
hbox.pack_start(self.iconview, True, True, 0)
hbox.pack_start(self.drop_area, True, True, 0)
button_box = Gtk.Box(spacing=6)
vbox.pack_start(button_box, True, False, 0)
image_button = Gtk.RadioButton.new_with_label_from_widget(None,
"Images")
image_button.connect("toggled", self.add_image_targets)
button_box.pack_start(image_button, True, False, 0)
text_button = Gtk.RadioButton.new_with_label_from_widget(image_button,
"Text")
text_button.connect("toggled", self.add_text_targets)
button_box.pack_start(text_button, True, False, 0)
self.add_image_targets()
def add_image_targets(self, button=None):
targets = Gtk.TargetList.new([])
targets.add_image_targets(TARGET_ENTRY_PIXBUF, True)
self.drop_area.drag_dest_set_target_list(targets)
self.iconview.drag_source_set_target_list(targets)
def add_text_targets(self, button=None):
self.drop_area.drag_dest_set_target_list(None)
self.iconview.drag_source_set_target_list(None)
self.drop_area.drag_dest_add_text_targets()
self.iconview.drag_source_add_text_targets()
<|fim▁hole|> self.set_text_column(COLUMN_TEXT)
self.set_pixbuf_column(COLUMN_PIXBUF)
model = Gtk.ListStore(str, GdkPixbuf.Pixbuf)
self.set_model(model)
self.add_item("Item 1", "image-missing")
self.add_item("Item 2", "help-about")
self.add_item("Item 3", "edit-copy")
self.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, [],
DRAG_ACTION)
self.connect("drag-data-get", self.on_drag_data_get)
def on_drag_data_get(self, widget, drag_context, data, info, time):
selected_path = self.get_selected_items()[0]
selected_iter = self.get_model().get_iter(selected_path)
if info == TARGET_ENTRY_TEXT:
text = self.get_model().get_value(selected_iter, COLUMN_TEXT)
data.set_text(text, -1)
elif info == TARGET_ENTRY_PIXBUF:
pixbuf = self.get_model().get_value(selected_iter, COLUMN_PIXBUF)
data.set_pixbuf(pixbuf)
def add_item(self, text, icon_name):
pixbuf = Gtk.IconTheme.get_default().load_icon(icon_name, 16, 0)
self.get_model().append([text, pixbuf])
class DropArea(Gtk.Label):
def __init__(self):
Gtk.Label.__init__(self, "Drop something on me!")
self.drag_dest_set(Gtk.DestDefaults.ALL, [], DRAG_ACTION)
self.connect("drag-data-received", self.on_drag_data_received)
def on_drag_data_received(self, widget, drag_context, x,y, data,info, time):
if info == TARGET_ENTRY_TEXT:
text = data.get_text()
print("Received text: %s" % text)
elif info == TARGET_ENTRY_PIXBUF:
pixbuf = data.get_pixbuf()
width = pixbuf.get_width()
height = pixbuf.get_height()
print("Received pixbuf with width %spx and height %spx" % (width,
height))
win = DragDropWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()<|fim▁end|> | class DragSourceIconView(Gtk.IconView):
def __init__(self):
Gtk.IconView.__init__(self) |
<|file_name|>plot_string_subst_bar.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Plotting performance of string_subst_.py scripts
# bar chart of relative comparison with variances as error bars
import numpy as np
import matplotlib.pyplot as plt
performance = [10.3882388499416,1,10.3212281215746]
variance = [0.790435196936213,0,0.827207394592818]
scripts = ['string_subst_1.py', 'string_subst_2.py', 'string_subst_3.py']
x_pos = np.arange(len(scripts))
plt.bar(x_pos, performance, yerr=variance, align='center', alpha=0.5)
plt.xticks(x_pos, scripts)
plt.axhline(y=1, linestyle='--', color='black')
plt.ylim([0,12])
plt.ylabel('rel. performance gain')
plt.title('String substitution - Speed improvements')
#plt.show()
plt.savefig('PNGs/string_subst_bar.png')<|fim▁end|> | |
<|file_name|>unwind-match.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license<|fim▁hole|>// Issue #945
// error-pattern:non-exhaustive match failure
fn test_box() {
@0;
}
fn test_str() {
let res = match false { true => { ~"happy" },
_ => fail!("non-exhaustive match failure") };
assert_eq!(res, ~"happy");
}
fn main() {
test_box();
test_str();
}<|fim▁end|> | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
<|file_name|>defines_69.js<|end_file_name|><|fim▁begin|>var searchData=<|fim▁hole|><|fim▁end|> | [
['img',['IMG',['../define_8h.html#a116f6464c8184676310301dc13ed1dd5',1,'define.h']]],
['items',['ITEMS',['../define_8h.html#a8e3d0b04841186d4f38b7880a9e4b5c6',1,'define.h']]]
]; |
<|file_name|>StringConverter.test.ts<|end_file_name|><|fim▁begin|>let assert = require('chai').assert;
import { StringConverter } from '../../src/convert/StringConverter';
suite('StringConverter', ()=> {
<|fim▁hole|> assert.equal('123', StringConverter.toString(123));
assert.equal('true', StringConverter.toString(true));
assert.equal('[object Object]', StringConverter.toStringWithDefault({ prop: 'xyz' }, 'xyz'));
assert.equal('xyz', StringConverter.toStringWithDefault(null, 'xyz'));
});
});<|fim▁end|> | test('To String', () => {
assert.equal(null, StringConverter.toNullableString(null));
assert.equal('xyz', StringConverter.toString('xyz'));
|
<|file_name|>bootstrap.js<|end_file_name|><|fim▁begin|>/**
* Bootstrap
* (sails.config.bootstrap)
*
* An asynchronous bootstrap function that runs before your Sails app gets lifted.
* This gives you an opportunity to set up your data model, run jobs, or perform some special logic.
*
* For more information on bootstrapping your app, check out:
* http://sailsjs.org/#!/documentation/reference/sails.config/sails.config.bootstrap.html
*/
module.exports.bootstrap = function(cb) {
// It's very important to trigger this callback method when you are finished
// with the bootstrap! (otherwise your server will never lift, since it's waiting on the bootstrap)
sails.services.passport.loadStrategies();
// CRON JOBS FOR INFLUENCERS, HASHTAGS, MENTIONS
// Runs every 15 minutes
const TIMEZONE = 'America/Los_Angeles';
<|fim▁hole|> var value = sails.config.cron[key];
new CronJob(key, value, null, true, TIMEZONE);
})
sails.config.twitterstream();
// new CronJob('00 * * * * *', function() {
// console.log(new Date(), 'You will see this message every minute.');
// }, null, true, TIMEZONE);
cb();
};<|fim▁end|> | var CronJob = require('cron').CronJob;
var cronJobs = Object.keys(sails.config.cron);
cronJobs.forEach(function(key) { |
<|file_name|>player.rs<|end_file_name|><|fim▁begin|>use crate::{
entity::{Character, Entity, EntityId, EntityPersistence, EntityRef, EntityType, StatsItem},
entity_copy_prop, entity_string_prop,
sessions::SignUpData,
};
use pbkdf2::{pbkdf2_check, pbkdf2_simple};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Player {
#[serde(skip)]
id: EntityId,
#[serde(flatten)]
pub character: Character,
description: String,
#[serde(default)]
is_admin: bool,
name: String,
#[serde(skip)]
needs_sync: bool,
password: String,
#[serde(skip)]
session_id: Option<u64>,
#[serde(flatten)]
stats_item: StatsItem,
}
impl Player {
entity_copy_prop!(pub, is_admin, set_is_admin, bool);
entity_copy_prop!(
pub,
session_id,
set_session_id,
Option<u64>,
EntityPersistence::DontSync
);
pub fn hydrate(id: EntityId, json: &str) -> Result<Box<dyn Entity>, String> {
let mut player = serde_json::from_str::<Player>(json)
.map_err(|error| format!("parse error: {}", error))?;
player.id = id;
Ok(Box::new(player))
}
pub fn matches_password(&self, password: &str) -> bool {
matches!(pbkdf2_check(password, &self.password), Ok(()))
}
pub fn new(id: EntityId, sign_up_data: &SignUpData) -> Self {
let character = Character::from_sign_up_data(sign_up_data);
Self {
id,
character,
description: String::new(),
is_admin: false,
name: sign_up_data.user_name.clone(),
needs_sync: true,
password: match pbkdf2_simple(&sign_up_data.password, 10) {
Ok(password) => password,
Err(error) => panic!("Cannot create password hash: {:?}", error),
},
session_id: None,
stats_item: StatsItem::from_stats(sign_up_data.stats.clone()),
}
}
pub fn set_password(&mut self, password: &str) {
match pbkdf2_simple(password, 10) {
Ok(password) => {
self.password = password;
self.set_needs_sync(true);
}
Err(error) => panic!("Cannot create password hash: {:?}", error),
}
}
}
impl Entity for Player {
entity_string_prop!(name, set_name);
entity_string_prop!(description, set_description);
fn as_character(&self) -> Option<&Character> {
Some(&self.character)
}
fn as_character_mut(&mut self) -> Option<&mut Character> {
Some(&mut self.character)
}
fn as_player(&self) -> Option<&Self> {
Some(self)
}
fn as_player_mut(&mut self) -> Option<&mut Self> {<|fim▁hole|> Some(self)
}
fn dehydrate(&self) -> String {
serde_json::to_string_pretty(self).unwrap_or_else(|error| {
panic!(
"Failed to serialize entity {:?}: {:?}",
self.entity_ref(),
error
)
})
}
fn entity_ref(&self) -> EntityRef {
EntityRef::new(EntityType::Player, self.id)
}
fn id(&self) -> EntityId {
self.id
}
fn needs_sync(&self) -> bool {
self.needs_sync || self.character.needs_sync() || self.stats_item.needs_sync()
}
fn set_needs_sync(&mut self, needs_sync: bool) {
self.needs_sync = needs_sync;
if !needs_sync {
self.character.set_needs_sync(needs_sync);
self.stats_item.set_needs_sync(needs_sync);
}
}
fn set_property(&mut self, prop_name: &str, value: &str) -> Result<(), String> {
match prop_name {
"description" => self.set_description(value.to_owned()),
"isAdmin" => self.set_is_admin(value == "true"),
"name" => self.set_name(value.to_owned()),
"password" => self.set_password(value),
_ => {
return self
.character
.set_property(prop_name, value)
.or_else(|_| self.stats_item.set_property(prop_name, value))
}
}
Ok(())
}
fn to_json_value(&self) -> serde_json::Result<serde_json::Value> {
serde_json::to_value(self)
}
}<|fim▁end|> | |
<|file_name|>suggest.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { first } from 'vs/base/common/async';
import { isFalsyOrEmpty } from 'vs/base/common/arrays';
import { assign } from 'vs/base/common/objects';
import { onUnexpectedExternalError, canceled } from 'vs/base/common/errors';
import { IEditorContribution } from 'vs/editor/common/editorCommon';
import { ITextModel } from 'vs/editor/common/model';
import { registerDefaultLanguageCommand } from 'vs/editor/browser/editorExtensions';
import { CompletionList, CompletionItemProvider, CompletionItem, CompletionProviderRegistry, CompletionContext, CompletionTriggerKind, CompletionItemKind } from 'vs/editor/common/modes';
import { Position, IPosition } from 'vs/editor/common/core/position';
import { RawContextKey } from 'vs/platform/contextkey/common/contextkey';
import { ICodeEditor } from 'vs/editor/browser/editorBrowser';
import { CancellationToken } from 'vs/base/common/cancellation';
import { Range } from 'vs/editor/common/core/range';
export const Context = {
Visible: new RawContextKey<boolean>('suggestWidgetVisible', false),
MultipleSuggestions: new RawContextKey<boolean>('suggestWidgetMultipleSuggestions', false),
MakesTextEdit: new RawContextKey('suggestionMakesTextEdit', true),
AcceptOnKey: new RawContextKey<boolean>('suggestionSupportsAcceptOnKey', true),
AcceptSuggestionsOnEnter: new RawContextKey<boolean>('acceptSuggestionOnEnter', true)
};
export interface ISuggestionItem {
position: IPosition;
suggestion: CompletionItem;
container: CompletionList;
support: CompletionItemProvider;
resolve(token: CancellationToken): Thenable<void>;
}
export type SnippetConfig = 'top' | 'bottom' | 'inline' | 'none';
let _snippetSuggestSupport: CompletionItemProvider;
export function getSnippetSuggestSupport(): CompletionItemProvider {
return _snippetSuggestSupport;
}
export function setSnippetSuggestSupport(support: CompletionItemProvider): CompletionItemProvider {
const old = _snippetSuggestSupport;
_snippetSuggestSupport = support;
return old;
}
export function provideSuggestionItems(
model: ITextModel,
position: Position,
snippetConfig: SnippetConfig = 'bottom',
onlyFrom?: CompletionItemProvider[],
context?: CompletionContext,
token: CancellationToken = CancellationToken.None
): Promise<ISuggestionItem[]> {
const allSuggestions: ISuggestionItem[] = [];
const acceptSuggestion = createSuggesionFilter(snippetConfig);
const wordUntil = model.getWordUntilPosition(position);
const defaultRange = new Range(position.lineNumber, wordUntil.startColumn, position.lineNumber, wordUntil.endColumn);
position = position.clone();
// get provider groups, always add snippet suggestion provider
const supports = CompletionProviderRegistry.orderedGroups(model);
// add snippets provider unless turned off
if (snippetConfig !== 'none' && _snippetSuggestSupport) {
supports.unshift([_snippetSuggestSupport]);
}
const suggestConext = context || { triggerKind: CompletionTriggerKind.Invoke };
// add suggestions from contributed providers - providers are ordered in groups of
// equal score and once a group produces a result the process stops
let hasResult = false;
const factory = supports.map(supports => () => {
// for each support in the group ask for suggestions
return Promise.all(supports.map(support => {
if (!isFalsyOrEmpty(onlyFrom) && onlyFrom.indexOf(support) < 0) {
return undefined;
}
return Promise.resolve(support.provideCompletionItems(model, position, suggestConext, token)).then(container => {
const len = allSuggestions.length;
if (container && !isFalsyOrEmpty(container.suggestions)) {
for (let suggestion of container.suggestions) {
if (acceptSuggestion(suggestion)) {
// fill in default range when missing
if (!suggestion.range) {
suggestion.range = defaultRange;
}
// fill in lower-case text
ensureLowerCaseVariants(suggestion);
allSuggestions.push({
position,
container,
suggestion,
support,
resolve: createSuggestionResolver(support, suggestion, model, position)
});
}
}
}
if (len !== allSuggestions.length && support !== _snippetSuggestSupport) {
hasResult = true;
}
}, onUnexpectedExternalError);
}));
});
const result = first(factory, () => {
// stop on result or cancellation
return hasResult || token.isCancellationRequested;
}).then(() => {
if (token.isCancellationRequested) {
return Promise.reject(canceled());
}
return allSuggestions.sort(getSuggestionComparator(snippetConfig));
});
// result.then(items => {
// console.log(model.getWordUntilPosition(position), items.map(item => `${item.suggestion.label}, type=${item.suggestion.type}, incomplete?${item.container.incomplete}, overwriteBefore=${item.suggestion.overwriteBefore}`));
// return items;
// }, err => {
// console.warn(model.getWordUntilPosition(position), err);
// });
return result;
}
export function ensureLowerCaseVariants(suggestion: CompletionItem) {
if (!suggestion._labelLow) {
suggestion._labelLow = suggestion.label.toLowerCase();
}
if (suggestion.sortText && !suggestion._sortTextLow) {
suggestion._sortTextLow = suggestion.sortText.toLowerCase();
}
if (suggestion.filterText && !suggestion._filterTextLow) {
suggestion._filterTextLow = suggestion.filterText.toLowerCase();
}
}
function createSuggestionResolver(provider: CompletionItemProvider, suggestion: CompletionItem, model: ITextModel, position: Position): (token: CancellationToken) => Promise<void> {
return (token) => {
if (typeof provider.resolveCompletionItem === 'function') {
return Promise.resolve(provider.resolveCompletionItem(model, position, suggestion, token)).then(value => { assign(suggestion, value); });
} else {
return Promise.resolve(void 0);
}
};
}
function createSuggesionFilter(snippetConfig: SnippetConfig): (candidate: CompletionItem) => boolean {
if (snippetConfig === 'none') {
return suggestion => suggestion.kind !== CompletionItemKind.Snippet;
} else {
return () => true;
}
}
function defaultComparator(a: ISuggestionItem, b: ISuggestionItem): number {
// check with 'sortText'
if (a.suggestion._sortTextLow && b.suggestion._sortTextLow) {
if (a.suggestion._sortTextLow < b.suggestion._sortTextLow) {
return -1;
} else if (a.suggestion._sortTextLow > b.suggestion._sortTextLow) {
return 1;
}
}
// check with 'label'
if (a.suggestion.label < b.suggestion.label) {
return -1;
} else if (a.suggestion.label > b.suggestion.label) {
return 1;
}
// check with 'type'
return a.suggestion.kind - b.suggestion.kind;
}
function snippetUpComparator(a: ISuggestionItem, b: ISuggestionItem): number {
if (a.suggestion.kind !== b.suggestion.kind) {
if (a.suggestion.kind === CompletionItemKind.Snippet) {
return -1;
} else if (b.suggestion.kind === CompletionItemKind.Snippet) {
return 1;
}
}
return defaultComparator(a, b);
}
function snippetDownComparator(a: ISuggestionItem, b: ISuggestionItem): number {
if (a.suggestion.kind !== b.suggestion.kind) {
if (a.suggestion.kind === CompletionItemKind.Snippet) {
return 1;
} else if (b.suggestion.kind === CompletionItemKind.Snippet) {
return -1;
}
}
return defaultComparator(a, b);
}
export function getSuggestionComparator(snippetConfig: SnippetConfig): (a: ISuggestionItem, b: ISuggestionItem) => number {<|fim▁hole|> } else if (snippetConfig === 'bottom') {
return snippetDownComparator;
} else {
return defaultComparator;
}
}
registerDefaultLanguageCommand('_executeCompletionItemProvider', (model, position, args) => {
const result: CompletionList = {
incomplete: false,
suggestions: []
};
let resolving: Thenable<any>[] = [];
let maxItemsToResolve = args['maxItemsToResolve'] || 0;
return provideSuggestionItems(model, position).then(items => {
for (const item of items) {
if (resolving.length < maxItemsToResolve) {
resolving.push(item.resolve(CancellationToken.None));
}
result.incomplete = result.incomplete || item.container.incomplete;
result.suggestions.push(item.suggestion);
}
}).then(() => {
return Promise.all(resolving);
}).then(() => {
return result;
});
});
interface SuggestController extends IEditorContribution {
triggerSuggest(onlyFrom?: CompletionItemProvider[]): void;
}
let _provider = new class implements CompletionItemProvider {
onlyOnceSuggestions: CompletionItem[] = [];
provideCompletionItems(): CompletionList {
let suggestions = this.onlyOnceSuggestions.slice(0);
let result = { suggestions };
this.onlyOnceSuggestions.length = 0;
return result;
}
};
CompletionProviderRegistry.register('*', _provider);
export function showSimpleSuggestions(editor: ICodeEditor, suggestions: CompletionItem[]) {
setTimeout(() => {
_provider.onlyOnceSuggestions.push(...suggestions);
editor.getContribution<SuggestController>('editor.contrib.suggestController').triggerSuggest([_provider]);
}, 0);
}<|fim▁end|> | if (snippetConfig === 'top') {
return snippetUpComparator; |
<|file_name|>upload.js<|end_file_name|><|fim▁begin|>'use strict'
// untuk status uploader
const STATUS_INITIAL = 0
const STATUS_SAVING = 1
const STATUS_SUCCESS = 2
const STATUS_FAILED = 3
// base url api
const BASE_URL = 'http://localhost:3000'
const app = new Vue({
el: '#app',
data: {
message: 'Hello',
currentStatus: null,
uploadError: null,
uploadFieldName: 'image',
uploadedFiles: [],
files:[]
},
computed: {
isInitial() {
return this.currentStatus === STATUS_INITIAL;
},
isSaving() {
return this.currentStatus === STATUS_SAVING;
},
isSuccess() {
return this.currentStatus === STATUS_SUCCESS;
},
isFailed() {
return this.currentStatus === STATUS_FAILED;
}
}, // computed
methods: {
// reset form to initial state
getAllFile () {
let self = this
axios.get(`${BASE_URL}/files`)
.then(response => {
self.files = response.data
})
}, // getAllFile()
deleteFile (id) {
axios.delete(`${BASE_URL}/files/${id}`)
.then(r => {
this.files.splice(id, 1)
})
},
reset() {
this.currentStatus = STATUS_INITIAL;
this.uploadedFiles = [];
this.uploadError = null;
}, // reset()
// upload data to the server
save(formData) {
console.log('save')
console.log('form data', formData)
this.currentStatus = STATUS_SAVING;
axios.post(`${BASE_URL}/files/add`, formData)
.then(up => {
// console.log('up', up)<|fim▁hole|> this.currentStatus = STATUS_SUCCESS
})
.catch(e => {
console.log(e.response.data)
this.currentStatus = STATUS_FAILED
})
}, // save()
filesChange(fieldName, fileList) {
// handle file changes
const formData = new FormData();
formData.append('image', fileList[0])
console.log(formData)
this.save(formData);
} // filesChange()
}, // methods
mounted () {
this.reset()
},
created () {
this.getAllFile()
}
})<|fim▁end|> | this.uploadedFiles = [].concat(up) |
<|file_name|>g++.py<|end_file_name|><|fim▁begin|>"""SCons.Tool.g++
Tool-specific initialization for g++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE<|fim▁hole|># NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/g++.py 3842 2008/12/20 22:59:52 scons"
import os.path
import re
import subprocess
import SCons.Tool
import SCons.Util
cplusplus = __import__('c++', globals(), locals(), [])
compilers = ['g++']
def generate(env):
"""Add Builders and construction variables for g++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
cplusplus.generate(env)
env['CXX'] = env.Detect(compilers)
# platform specific settings
if env['PLATFORM'] == 'aix':
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -mminimal-toc')
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
elif env['PLATFORM'] == 'hpux':
env['SHOBJSUFFIX'] = '.pic.o'
elif env['PLATFORM'] == 'sunos':
env['SHOBJSUFFIX'] = '.pic.o'
# determine compiler version
if env['CXX']:
#pipe = SCons.Action._subproc(env, [env['CXX'], '-dumpversion'],
pipe = SCons.Action._subproc(env, [env['CXX'], '--version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
if pipe.wait() != 0: return
# -dumpversion was added in GCC 3.0. As long as we're supporting
# GCC versions older than that, we should use --version and a
# regular expression.
#line = pipe.stdout.read().strip()
#if line:
# env['CXXVERSION'] = line
line = pipe.stdout.readline()
match = re.search(r'[0-9]+(\.[0-9]+)+', line)
if match:
env['CXXVERSION'] = match.group(0)
def exists(env):
return env.Detect(compilers)<|fim▁end|> | # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
<|file_name|>issue-5100.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum A { B, C }
fn main() {
match (true, false) {
B => (), //~ ERROR expected `(bool,bool)` but found an enum or structure pattern
_ => ()
}
match (true, false) {
(true, false, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found tuple (expected a tuple with 2 elements but found one with 3 elements)
}
match (true, false) {
@(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found an @-box pattern
}
match (true, false) {
~(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found a ~-box pattern
}
match (true, false) {
&(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found an &-pointer pattern
}
let v = [('a', 'b') //~ ERROR expected function but found `(char,char)`
('c', 'd'),
('e', 'f')];
for &(x,y) in v.iter() {} // should be OK
// Make sure none of the errors above were fatal
let x: char = true; //~ ERROR expected `char` but found `bool`
}<|fim▁end|> | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. |
<|file_name|>phage_annotation_table.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# vim: set fileencoding=utf-8
import os
import argparse
from gff3 import genes, get_gff3_id, get_rbs_from, feature_test_true, feature_lambda, feature_test_type
from cpt_gffParser import gffParse, gffWrite
from Bio import SeqIO
from jinja2 import Environment, FileSystemLoader
import logging
from math import floor
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name="pat")
# Path to script, required because of Galaxy.
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
# Path to the HTML template for the report
def genes_all(feature_list, feature_type=["gene"], sort=False):
"""
Simple filter to extract gene features from the feature set.
"""
if not sort:
for x in feature_lambda(
feature_list, feature_test_type, {"types": feature_type}, subfeatures=True
):
yield x
else:
data = list(genes_all(feature_list, feature_type, sort=False))
data = sorted(data, key=lambda feature: feature.location.start)
for x in data:
yield x
def checkSubs(feature, qualName):
subFeats = []
res = ""
subFeats = feature.sub_features
while (len(subFeats) > 0):
for feat in subFeats:
for i in feat.qualifiers.keys():
for j in qualName:
if i == j:
if res == "":
res = feat.qualifiers[i][0]
else:
res += "; " + feat.qualifiers[i][0]
if res != "":
return res
tempFeats = []
for feat in subFeats: # Should be breadth-first results
for x in feat.sub_features:
tempFeats.append(x)
subFeats = tempFeats
return res
def annotation_table_report(record, types, wanted_cols, gaf_data, searchSubs):
getTypes = []
for x in [y.strip() for y in types.split(",")]:
getTypes.append(x)
getTypes.append("gene")
sorted_features = list(genes_all(record.features, getTypes, sort=True))
if wanted_cols is None or len(wanted_cols.strip()) == 0:
return [], []
useSubs = searchSubs
def rid(record, feature):
"""Organism ID
"""
return record.id
def id(record, feature):
"""ID
"""
return feature.id
def featureType(record, feature):
"""Type
"""
return feature.type
def name(record, feature):
"""Name
"""
for x in ["Name", "name"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Name", "name"])
if res != "":
return res
return "None"
def start(record, feature):
"""Boundary
"""
return str(feature.location.start + 1)
def end(record, feature):
"""Boundary
"""
return str(feature.location.end)
def location(record, feature):
"""Location
"""
return str(feature.location.start + 1) + "..{0.end}".format(feature.location)
def length(record, feature):
"""CDS Length (AA)
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if cdss == []:
return "None"
res = (sum([len(cds) for cds in cdss]) / 3) - 1
if floor(res) == res:
res = int(res)
return str(res)
def notes(record, feature):
"""User entered Notes"""
for x in ["Note", "note", "Notes", "notes"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Note", "note", "Notes", "notes"])
if res != "":
return res
return "None"
def date_created(record, feature):
"""Created"""
return feature.qualifiers.get("date_creation", ["None"])[0]
def date_last_modified(record, feature):
"""Last Modified"""
res = feature.qualifiers.get("date_last_modified", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["date_last_modified"])
if res != "":
return res
return "None"
def description(record, feature):
"""Description"""
res = feature.qualifiers.get("description", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["description"])
if res != "":
return res
return "None"
def owner(record, feature):
"""Owner
User who created the feature. In a 464 scenario this may be one of
the TAs."""
for x in ["Owner", "owner"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Owner", "owner"])
if res != "":
return res
return "None"
def product(record, feature):
"""Product
User entered product qualifier (collects "Product" and "product"
entries)"""
"""User entered Notes"""
for x in ["product", "Product"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["product", "Product"])
if res != "":
return res
return "None"
def note(record, feature):
"""Note
User entered Note qualifier(s)"""
return feature.qualifiers.get("Note", [])
def strand(record, feature):
"""Strand
"""
return "+" if feature.location.strand > 0 else "-"
def sd_spacing(record, feature):
"""Shine-Dalgarno spacing
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if len(cdss) == 0:
return "No CDS"
if rbs.location.strand > 0:
distance = min(
cdss, key=lambda x: x.location.start - rbs.location.end
)
distance_val = str(distance.location.start - rbs.location.end)
resp.append(distance_val)
else:
distance = min(
cdss, key=lambda x: x.location.end - rbs.location.start
)
distance_val = str(rbs.location.start - distance.location.end)
resp.append(distance_val)
if len(resp) == 1:
return str(resp[0])
return resp
def sd_seq(record, feature):
"""Shine-Dalgarno sequence
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
resp.append(str(rbs.extract(record).seq))
if len(resp) == 1:
return str(resp[0])
else:
return resp
def start_codon(record, feature):
"""Start Codon
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
data = [x for x in cdss]
if len(data) == 1:
return str(data[0].extract(record).seq[0:3])
else:
return [
"{0} ({1.location.start}..{1.location.end}:{1.location.strand})".format(
x.extract(record).seq[0:3], x
)
for x in data
]
def stop_codon(record, feature):
"""Stop Codon
"""
return str(feature.extract(record).seq[-3:])
def dbxrefs(record, feature):
"""DBxrefs
"""
"""User entered Notes"""
for x in ["Dbxref", "db_xref", "DB_xref", "DBxref", "DB_Xref", "DBXref"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
return "None"
def upstream_feature(record, feature):
"""Next gene upstream"""
if feature.strand > 0:
upstream_features = [
x for x in sorted_features if (x.location.start < feature.location.start and x.type == "gene" and x.strand == feature.strand)
]
if len(upstream_features) > 0:
foundSelf = False
featCheck = upstream_features[-1].sub_features
for x in featCheck:
if x == feature:
foundSelf = True
break
featCheck = featCheck + x.sub_features
if foundSelf:
if len(upstream_features) > 1:
return upstream_features[-2]
return None
return upstream_features[-1]
else:
return None
else:
upstream_features = [
x for x in sorted_features if (x.location.end > feature.location.end and x.type == "gene" and x.strand == feature.strand)
]
if len(upstream_features) > 0:
foundSelf = False
featCheck = upstream_features[0].sub_features
for x in featCheck:
if x == feature:
foundSelf = True
break
featCheck = featCheck + x.sub_features
if foundSelf:
if len(upstream_features) > 1:
return upstream_features[1]
return None
return upstream_features[0]
else:
return None
def upstream_feature__name(record, feature):
"""Next gene upstream"""
up = upstream_feature(record, feature)
if up:
return str(up.id)
return "None"
def ig_dist(record, feature):
"""Distance to next upstream gene on same strand"""
up = upstream_feature(record, feature)
if up:
dist = None
if feature.strand > 0:
dist = feature.location.start - up.location.end
else:
dist = up.location.start - feature.location.end
return str(dist)
else:
return "None"
def _main_gaf_func(record, feature, gaf_data, attr):
if feature.id in gaf_data:
return [x[attr] for x in gaf_data[feature.id]]
return []
def gaf_annotation_extension(record, feature, gaf_data):
"""GAF Annotation Extension
Contains cross references to other ontologies that can be used
to qualify or enhance the annotation. The cross-reference is
prefaced by an appropriate GO relationship; references to
multiple ontologies can be entered. For example, if a gene
product is localized to the mitochondria of lymphocytes, the GO
ID (column 5) would be mitochondrion ; GO:0005439, and the
annotation extension column would contain a cross-reference to
the term lymphocyte from the Cell Type Ontology.
"""
return _main_gaf_func(record, feature, gaf_data, "annotation_extension")
def gaf_aspect(record, feature, gaf_data):
"""GAF Aspect code
E.g. P (biological process), F (molecular function) or C (cellular component)
"""
return _main_gaf_func(record, feature, gaf_data, "aspect")<|fim▁hole|> """GAF Creating Organisation
"""
return _main_gaf_func(record, feature, gaf_data, "assigned_by")
def gaf_date(record, feature, gaf_data):
"""GAF Creation Date
"""
return _main_gaf_func(record, feature, gaf_data, "date")
def gaf_db(record, feature, gaf_data):
"""GAF DB
"""
return _main_gaf_func(record, feature, gaf_data, "db")
def gaf_db_reference(record, feature, gaf_data):
"""GAF DB Reference
"""
return _main_gaf_func(record, feature, gaf_data, "db_reference")
def gaf_evidence_code(record, feature, gaf_data):
"""GAF Evidence Code
"""
return _main_gaf_func(record, feature, gaf_data, "evidence_code")
def gaf_go_id(record, feature, gaf_data):
"""GAF GO ID
"""
return _main_gaf_func(record, feature, gaf_data, "go_id")
def gaf_go_term(record, feature, gaf_data):
"""GAF GO Term
"""
return _main_gaf_func(record, feature, gaf_data, "go_term")
def gaf_id(record, feature, gaf_data):
"""GAF ID
"""
return _main_gaf_func(record, feature, gaf_data, "id")
def gaf_notes(record, feature, gaf_data):
"""GAF Notes
"""
return _main_gaf_func(record, feature, gaf_data, "notes")
def gaf_owner(record, feature, gaf_data):
"""GAF Creator
"""
return _main_gaf_func(record, feature, gaf_data, "owner")
def gaf_with_or_from(record, feature, gaf_data):
"""GAF With/From
"""
return _main_gaf_func(record, feature, gaf_data, "with_or_from")
cols = []
data = []
funcs = []
lcl = locals()
for x in [y.strip().lower() for y in wanted_cols.split(",")]:
if not x:
continue
if x == "type":
x = "featureType"
if x in lcl:
funcs.append(lcl[x])
# Keep track of docs
func_doc = lcl[x].__doc__.strip().split("\n\n")
# If there's a double newline, assume following text is the
# "help" and the first part is the "name". Generate empty help
# if not provided
if len(func_doc) == 1:
func_doc += [""]
cols.append(func_doc)
elif "__" in x:
chosen_funcs = [lcl[y] for y in x.split("__")]
func_doc = [
" of ".join(
[y.__doc__.strip().split("\n\n")[0] for y in chosen_funcs[::-1]]
)
]
cols.append(func_doc)
funcs.append(chosen_funcs)
for gene in genes_all(record.features, getTypes, sort=True):
row = []
for func in funcs:
if isinstance(func, list):
# If we have a list of functions, repeatedly apply them
value = gene
for f in func:
if value is None:
value = "None"
break
value = f(record, value)
else:
# Otherwise just apply the lone function
if func.__name__.startswith("gaf_"):
value = func(record, gene, gaf_data)
else:
value = func(record, gene)
if isinstance(value, list):
collapsed_value = ", ".join(value)
value = [str(collapsed_value)]#.encode("unicode_escape")]
else:
value = str(value)#.encode("unicode_escape")
row.append(value)
# print row
data.append(row)
return data, cols
def parseGafData(file):
cols = []
data = {}
# '10d04a01-5ed8-49c8-b724-d6aa4df5a98d': {
# 'annotation_extension': '',
# 'aspect': '',
# 'assigned_by': 'CPT',
# 'date': '2017-05-04T16:25:22.161916Z',
# 'db': 'UniProtKB',
# 'db_reference': 'GO_REF:0000100',
# 'evidence_code': 'ISA',
# 'gene': '0d307196-833d-46e8-90e9-d80f7a041d88',
# 'go_id': 'GO:0039660',
# 'go_term': 'structural constituent of virion',
# 'id': '10d04a01-5ed8-49c8-b724-d6aa4df5a98d',
# 'notes': 'hit was putative minor structural protein',
# 'owner': '[email protected]',
# 'with_or_from': 'UNIREF90:B2ZYZ7'
# },
for row in file:
if row.startswith("#"):
# Header
cols = (
row.strip().replace("# ", "").replace("GO Term", "go_term").split("\t")
)
else:
line = row.strip().split("\t")
tmp = dict(zip(cols, line))
if "gene" not in tmp.keys():
continue
if tmp["gene"] not in data:
data[tmp["gene"]] = []
data[tmp["gene"]].append(tmp)
return data
def evaluate_and_report(
annotations,
genome,
types="gene",
reportTemplateName="phage_annotation_validator.html",
annotationTableCols="",
gafData=None,
searchSubs = False,
):
"""
Generate our HTML evaluation of the genome
"""
# Get features from GFF file
seq_dict = SeqIO.to_dict(SeqIO.parse(genome, "fasta"))
# Get the first GFF3 record
# TODO: support multiple GFF3 files.
at_table_data = []
gaf = {}
if gafData:
gaf = parseGafData(gafData)
for record in gffParse(annotations, base_dict=seq_dict):
if reportTemplateName.endswith(".html"):
record.id = record.id.replace(".", "-")
log.info("Producing an annotation table for %s" % record.id)
annotation_table_data, annotation_table_col_names = annotation_table_report(
record, types, annotationTableCols, gaf, searchSubs
)
at_table_data.append((record, annotation_table_data))
# break
# This is data that will go into our HTML template
kwargs = {
"annotation_table_data": at_table_data,
"annotation_table_col_names": annotation_table_col_names,
}
env = Environment(
loader=FileSystemLoader(SCRIPT_PATH), trim_blocks=True, lstrip_blocks=True
)
if reportTemplateName.endswith(".html"):
env.filters["nice_id"] = str(get_gff3_id).replace(".", "-")
else:
env.filters["nice_id"] = get_gff3_id
def join(listy):
return "\n".join(listy)
env.filters.update({"join": join})
tpl = env.get_template(reportTemplateName)
return tpl.render(**kwargs).encode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="rebase gff3 features against parent locations", epilog=""
)
parser.add_argument(
"annotations", type=argparse.FileType("r"), help="Parent GFF3 annotations"
)
parser.add_argument("genome", type=argparse.FileType("r"), help="Genome Sequence")
parser.add_argument(
"--types",
help="Select extra types to display in output (Will always include gene)",
)
parser.add_argument(
"--reportTemplateName",
help="Report template file name",
default="phageqc_report_full.html",
)
parser.add_argument(
"--annotationTableCols",
help="Select columns to report in the annotation table output format",
)
parser.add_argument(
"--gafData", help="CPT GAF-like table", type=argparse.FileType("r")
)
parser.add_argument(
"--searchSubs", help="Attempt to populate fields from sub-features if qualifier is empty", action="store_true"
)
args = parser.parse_args()
print(evaluate_and_report(**vars(args)).decode("utf-8"))<|fim▁end|> |
def gaf_assigned_by(record, feature, gaf_data): |
<|file_name|>speed_limit_exceeded_by_hour.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
from scripts.db_api import accident
def usa_query(hour):
return '''
SELECT count(*), (select count(*) from accident
join vehicle on(acc_id = accident.id)
where country = 'USA'
and vehicle.speed > accident.speed_limit<|fim▁hole|>where country = 'USA' and date_part('hour', timestamp) = {0};
'''.format(hour)
def get_value(age, dictionary):
if age not in dictionary:
return 0
return dictionary[age]
if __name__ == '__main__':
print('HOUR\tALL\tEXCEEDED')
for i in xrange(0, 24):
usa_count = accident.execute_query(usa_query(i))
print('{0}\t{1}\t{2}'.format(i, usa_count[0][0], usa_count[0][1]))<|fim▁end|> | and vehicle.speed > -1
and accident.speed_limit > 0
and date_part('hour', timestamp) = {0}) as exceeded
from accident |
<|file_name|>t242.py<|end_file_name|><|fim▁begin|>class O(object): pass
class A(O): pass
class B(O): pass
class C(O): pass<|fim▁hole|>class D(O): pass
class E(O): pass
class K1(A,B,C): pass
class K2(D,B,E): pass
class K3(D,A): pass
class Z(K1,K2,K3): pass
print K1.__mro__
print K2.__mro__
print K3.__mro__
print Z.__mro__<|fim▁end|> | |
<|file_name|>virtual_machine_scale_set_extension_profile.go<|end_file_name|><|fim▁begin|>package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/errors"
)
// VirtualMachineScaleSetExtensionProfile Describes a virtual machine scale set extension profile.
// swagger:model VirtualMachineScaleSetExtensionProfile
type VirtualMachineScaleSetExtensionProfile struct {
// The virtual machine scale set child extension resources.
Extensions []*VirtualMachineScaleSetExtension `json:"extensions"`
}
// Validate validates this virtual machine scale set extension profile
func (m *VirtualMachineScaleSetExtensionProfile) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExtensions(formats); err != nil {
// prop
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *VirtualMachineScaleSetExtensionProfile) validateExtensions(formats strfmt.Registry) error {
if swag.IsZero(m.Extensions) { // not required
return nil
}
for i := 0; i < len(m.Extensions); i++ {
if swag.IsZero(m.Extensions[i]) { // not required
continue
}
if m.Extensions[i] != nil {
if err := m.Extensions[i].Validate(formats); err != nil {
return err
}
}<|fim▁hole|>
return nil
}<|fim▁end|> |
} |
<|file_name|>compaction_filter.rs<|end_file_name|><|fim▁begin|>//! `CompactionFilter` allows an application to modify/delete a key-value at
//! the time of compaction.
use std::os::raw::{c_char, c_int};
use rocks_sys as ll;
#[repr(C)]
#[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Copy, Clone)]
pub enum ValueType {
Value = 0,
MergeOperand = 1,
}
#[derive(Debug)]
pub enum Decision {
Keep,
Remove,
ChangeValue(Vec<u8>),
RemoveAndSkipUntil(Vec<u8>),
}
impl Decision {
// to C Decision type
fn to_c(&self) -> c_int {
match *self {
Decision::Keep => 0,
Decision::Remove => 1,
Decision::ChangeValue(_) => 2,
Decision::RemoveAndSkipUntil(_) => 3,
}
}
}
/// `CompactionFilter` allows an application to modify/delete a key-value at
/// the time of compaction.
pub trait CompactionFilter {
// The compaction process invokes this
// method for kv that is being compacted. A return value
// of false indicates that the kv should be preserved in the
// output of this compaction run and a return value of true
// indicates that this key-value should be removed from the
// output of the compaction. The application can inspect
// the existing value of the key and make decision based on it.
//
// Key-Values that are results of merge operation during compaction are not
// passed into this function. Currently, when you have a mix of Put()s and
// Merge()s on a same key, we only guarantee to process the merge operands
// through the compaction filters. Put()s might be processed, or might not.
//
// When the value is to be preserved, the application has the option
// to modify the existing_value and pass it back through new_value.
// value_changed needs to be set to true in this case.
//
// If you use snapshot feature of RocksDB (i.e. call GetSnapshot() API on a
// DB* object), CompactionFilter might not be very useful for you. Due to
// guarantees we need to maintain, compaction process will not call Filter()
// on any keys that were written before the latest snapshot. In other words,
// compaction will only call Filter() on keys written after your most recent
// call to GetSnapshot(). In most cases, Filter() will not be called very
// often. This is something we're fixing. See the discussion at:
// https://www.facebook.com/groups/mysqlonrocksdb/permalink/999723240091865/
//
// If multithreaded compaction is being used *and* a single CompactionFilter
// instance was supplied via Options::compaction_filter, this method may be
// called from different threads concurrently. The application must ensure
// that the call is thread-safe.
//
// If the CompactionFilter was created by a factory, then it will only ever
// be used by a single thread that is doing the compaction run, and this
// call does not need to be thread-safe. However, multiple filters may be
// in existence and operating concurrently.
//
// The last paragraph is not true if you set max_subcompactions to more than
// 1. In that case, subcompaction from multiple threads may call a single
// CompactionFilter concurrently.
//
// For rust:
// - None: false, indicates that the kv should be preserved in the output of this compaction run.
// - Some(None): true, indicates that this key-value should be removed from the output of the
// compaction.
// - Some(Some(vec![])): modify the existing_value and pass it back through new_value.
// fn filter(&self, level: u32, key: &[u8], existing_value: &[u8]) -> Option<Option<Vec<u8>>> {
// None
// }
//
// The compaction process invokes this method on every merge operand. If this
// method returns true, the merge operand will be ignored and not written out
// in the compaction output
//
// Note: If you are using a TransactionDB, it is not recommended to implement
// FilterMergeOperand(). If a Merge operation is filtered out, TransactionDB
// may not realize there is a write conflict and may allow a Transaction to
// Commit that should have failed. Instead, it is better to implement any
// Merge filtering inside the MergeOperator.
// fn filter_merge_operand(&self, level: u32, key: &[u8], operand: &[u8]) -> bool {
// false
// }
//
/// An extended API. Called for both values and merge operands.
/// Allows changing value and skipping ranges of keys.
/// The default implementation uses Filter() and FilterMergeOperand().
/// If you're overriding this method, no need to override the other two.
/// `value_type` indicates whether this key-value corresponds to a normal
/// value (e.g. written with Put()) or a merge operand (written with Merge()).
///
/// Possible return values:
/// * kKeep - keep the key-value pair.
/// * kRemove - remove the key-value pair or merge operand.
/// * kChangeValue - keep the key and change the value/operand to *new_value.
/// * kRemoveAndSkipUntil - remove this key-value pair, and also remove all key-value pairs
/// with key in [key, *skip_until). This range of keys will be skipped without reading,
/// potentially saving some IO operations compared to removing the keys one by one.
///
/// *skip_until <= key is treated the same as Decision::kKeep
/// (since the range [key, *skip_until) is empty).
///
/// The keys are skipped even if there are snapshots containing them,
/// as if IgnoreSnapshots() was true; i.e. values removed
/// by kRemoveAndSkipUntil can disappear from a snapshot - beware
/// if you're using TransactionDB or DB::GetSnapshot().
///
/// Another warning: if value for a key was overwritten or merged into
/// (multiple Put()s or Merge()s), and compaction filter skips this key
/// with kRemoveAndSkipUntil, it's possible that it will remove only
/// the new value, exposing the old value that was supposed to be
/// overwritten.
///
/// If you use kRemoveAndSkipUntil, consider also reducing
/// compaction_readahead_size option.
///
/// Note: If you are using a TransactionDB, it is not recommended to filter
/// out or modify merge operands (ValueType::kMergeOperand).
/// If a merge operation is filtered out, TransactionDB may not realize there
/// is a write conflict and may allow a Transaction to Commit that should have
/// failed. Instead, it is better to implement any Merge filtering inside the
/// MergeOperator.
///
/// Rust:
/// Decision for detailed return type.
fn filter(&mut self, level: i32, key: &[u8], value_type: ValueType, existing_value: &[u8]) -> Decision {
Decision::Keep
}
/// This function is deprecated. Snapshots will always be ignored for
/// compaction filters, because we realized that not ignoring snapshots doesn't
/// provide the gurantee we initially thought it would provide. Repeatable
/// reads will not be guaranteed anyway. If you override the function and
/// returns false, we will fail the compaction.
fn ignore_snapshots(&self) -> bool {
true
}
/// Returns a name that identifies this compaction filter.
/// The name will be printed to LOG file on start up for diagnosis.
fn name(&self) -> &str {
"RustCompactionFilterV2\0"
}
}
/// Each compaction will create a new `CompactionFilter` allowing the
/// application to know about different compactions
pub trait CompactionFilterFactory {
fn create_compaction_filter(&self, context: &Context) -> Box<dyn CompactionFilter>;
/// Returns a name that identifies this compaction filter factory.
fn name(&self) -> &str {
"RustCompactionFilterFactory\0"
}
}
/// Context information of a compaction run
#[repr(C)]
pub struct Context {
/// Does this compaction run include all data files
pub is_full_compaction: bool,
/// Is this compaction requested by the client (true),
/// or is it occurring as an automatic compaction process
pub is_manual_compaction: bool,
/// Which column family this compaction is for.
pub column_family_id: u32,
}
// call rust fn in C
#[doc(hidden)]
pub mod c {
use super::*;
#[no_mangle]
#[allow(mutable_transmutes)]
pub unsafe extern "C" fn rust_compaction_filter_call(
f: *mut (),
level: c_int,
key: &&[u8], // *Slice
value_type: ValueType,
existing_value: &&[u8], // *Slice
new_value: *mut (), // *std::string
skip_until: *mut (),
) -> c_int {
assert!(!f.is_null());
// FIXME: borrow as mutable
let filter = f as *mut &mut (dyn CompactionFilter + Sync);
// must be the same as C part
match (*filter).filter(level, key, value_type, existing_value) {
Decision::Keep => 0,
Decision::Remove => 1,
Decision::ChangeValue(nval) => {
ll::cxx_string_assign(new_value as *mut _, nval.as_ptr() as *const _, nval.len());
2
},
Decision::RemoveAndSkipUntil(skip) => {
ll::cxx_string_assign(skip_until as *mut _, skip.as_ptr() as *const _, skip.len());
3
},
}
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_drop(f: *mut ()) {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
Box::from_raw(filter);
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_name(f: *mut ()) -> *const c_char {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
(*filter).name().as_ptr() as _
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_ignore_snapshots(f: *mut ()) -> c_char {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
(*filter).ignore_snapshots() as _
}
}<|fim▁hole|> use crate::rocksdb::*;
use super::*;
use lazy_static::lazy_static;
pub struct MyCompactionFilter;
impl CompactionFilter for MyCompactionFilter {
fn filter(&mut self, level: i32, key: &[u8], value_type: ValueType, existing_value: &[u8]) -> Decision {
assert_eq!(value_type, ValueType::Value); // haven't set up merge test
if existing_value == b"TO-BE-DELETED" {
Decision::Remove
} else if existing_value == b"an-typo-in-value" {
Decision::ChangeValue(b"a-typo-not-in-value".to_vec())
} else if key == b"key-0" {
Decision::RemoveAndSkipUntil(b"key-5".to_vec())
} else {
Decision::Keep
}
}
}
lazy_static! {
static ref MY_COMPACTION_FILTER: MyCompactionFilter = MyCompactionFilter;
}
#[test]
fn compaction_filter() {
let tmp_dir = ::tempdir::TempDir::new_in(".", "rocks").unwrap();
let db = DB::open(
Options::default()
.map_db_options(|db| db.create_if_missing(true))
.map_cf_options(|cf| cf.compaction_filter(&*MY_COMPACTION_FILTER)),
&tmp_dir,
)
.unwrap();
println!("compact and try remove range");
assert!(db.put(&WriteOptions::default(), b"key-0", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-1", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-2", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-3", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-4", b"23333").is_ok());
// following will be reserved
assert!(db.put(&WriteOptions::default(), b"key-5", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-6", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-7", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-8", b"23333").is_ok());
println!("compact and delete");
assert!(db
.put(&WriteOptions::default(), b"will-delete-me", b"TO-BE-DELETED")
.is_ok());
println!("compact and change value");
assert!(db
.put(&WriteOptions::default(), b"will-fix-me", b"an-typo-in-value")
.is_ok());
// now compact full range
let ret = db.compact_range(&Default::default(), ..);
assert!(ret.is_ok(), "error: {:?}", ret);
assert!(db.get(&ReadOptions::default(), b"will-delete-me").is_err());
assert!(db
.get(&ReadOptions::default(), b"will-delete-me")
.unwrap_err()
.is_not_found());
assert!(db.get(&ReadOptions::default(), b"key-0").is_err());
assert!(db.get(&ReadOptions::default(), b"key-0").unwrap_err().is_not_found());
assert!(db.get(&ReadOptions::default(), b"key-4").is_err());
assert!(db.get(&ReadOptions::default(), b"key-4").unwrap_err().is_not_found());
assert_eq!(db.get(&ReadOptions::default(), b"key-5").unwrap(), b"23333");
assert_eq!(
db.get(&ReadOptions::default(), b"will-fix-me").unwrap(),
b"a-typo-not-in-value"
);
drop(db);
drop(tmp_dir);
}
}<|fim▁end|> |
#[cfg(test)]
mod tests { |
<|file_name|>mangle.cpp<|end_file_name|><|fim▁begin|>// RUN: %clang_cc1 -emit-llvm %s -o - -triple=x86_64-apple-darwin9 -fblocks -std=c++11 | FileCheck %s
struct X { };
struct Y { };
// CHECK: @unmangled_variable = global
// CHECK: @_ZN1N1iE = global
// CHECK: @_ZZN1N1fEiiE1b = internal global
// CHECK: @_ZZN1N1gEvE1a = internal global
// CHECK: @_ZGVZN1N1gEvE1a = internal global
//CHECK: @pr5966_i = external global
//CHECK: @_ZL8pr5966_i = internal global
// CHECK: define zeroext i1 @_ZplRK1YRA100_P1X
bool operator+(const Y&, X* (&xs)[100]) { return false; }
// CHECK: define void @_Z1f1s
typedef struct { int a; } s;
void f(s) { }
// CHECK: define void @_Z1f1e
typedef enum { foo } e;
void f(e) { }
// CHECK: define void @_Z1f1u
typedef union { int a; } u;
void f(u) { }
// CHECK: define void @_Z1f1x
typedef struct { int a; } x,y;
void f(y) { }
// CHECK: define void @_Z1fv
void f() { }
// CHECK: define void @_ZN1N1fEv
namespace N { void f() { } }
// CHECK: define void @_ZN1N1N1fEv
namespace N { namespace N { void f() { } } }
// CHECK: define void @unmangled_function
extern "C" { namespace N { void unmangled_function() { } } }
extern "C" { namespace N { int unmangled_variable = 10; } }
namespace N { int i; }
namespace N { int f(int, int) { static int b; return b; } }
namespace N { int h(); void g() { static int a = h(); } }
// CHECK: define void @_Z1fno
void f(__int128_t, __uint128_t) { }
template <typename T> struct S1 {};
// CHECK: define void @_Z1f2S1IiE
void f(S1<int>) {}
// CHECK: define void @_Z1f2S1IdE
void f(S1<double>) {}
template <int N> struct S2 {};
// CHECK: define void @_Z1f2S2ILi100EE
void f(S2<100>) {}
// CHECK: define void @_Z1f2S2ILin100EE
void f(S2<-100>) {}
template <bool B> struct S3 {};
// CHECK: define void @_Z1f2S3ILb1EE
void f(S3<true>) {}
// CHECK: define void @_Z1f2S3ILb0EE
void f(S3<false>) {}
struct S;
// CHECK: define void @_Z1fM1SKFvvE
void f(void (S::*)() const) {}
// CHECK: define void @_Z1fM1SFvvE
void f(void (S::*)()) {}
// CHECK: define void @_Z1fi
void f(const int) { }
template<typename T, typename U> void ft1(U u, T t) { }
template<typename T> void ft2(T t, void (*)(T), void (*)(T)) { }
template<typename T, typename U = S1<T> > struct S4 { };
template<typename T> void ft3(S4<T>*) { }
namespace NS {
template<typename T> void ft1(T) { }
}
void g1() {
// CHECK: @_Z3ft1IidEvT0_T_
ft1<int, double>(1, 0);
// CHECK: @_Z3ft2IcEvT_PFvS0_ES2_
ft2<char>(1, 0, 0);
// CHECK: @_Z3ft3IiEvP2S4IT_2S1IS1_EE
ft3<int>(0);
// CHECK: @_ZN2NS3ft1IiEEvT_
NS::ft1<int>(1);
}
// Expressions
template<int I> struct S5 { };
template<int I> void ft4(S5<I>) { }
void g2() {
// CHECK: @_Z3ft4ILi10EEv2S5IXT_EE
ft4(S5<10>());
// CHECK: @_Z3ft4ILi20EEv2S5IXT_EE
ft4(S5<20>());
}
extern "C++" {
// CHECK: @_Z1hv
void h() { }
}
// PR5019
extern "C" { struct a { int b; }; }
// CHECK: @_Z1fP1a
int f(struct a *x) {
return x->b;
}
// PR5017
extern "C" {
struct Debug {
const Debug& operator<< (unsigned a) const { return *this; }
};
Debug dbg;
// CHECK: @_ZNK5DebuglsEj
int main(void) { dbg << 32 ;}
}
template<typename T> struct S6 {
typedef int B;
};
template<typename T> void ft5(typename S6<T>::B) { }
// CHECK: @_Z3ft5IiEvN2S6IT_E1BE
template void ft5<int>(int);
template<typename T> class A {};
namespace NS {
template<typename T> bool operator==(const A<T>&, const A<T>&) { return true; }
}
// CHECK: @_ZN2NSeqIcEEbRK1AIT_ES5_
template bool NS::operator==(const ::A<char>&, const ::A<char>&);
namespace std {
template<typename T> bool operator==(const A<T>&, const A<T>&) { return true; }
}
// CHECK: @_ZSteqIcEbRK1AIT_ES4_
template bool std::operator==(const ::A<char>&, const ::A<char>&);
struct S {
typedef int U;
};
template <typename T> typename T::U ft6(const T&) { return 0; }
// CHECK: @_Z3ft6I1SENT_1UERKS1_
template int ft6<S>(const S&);
template<typename> struct __is_scalar_type {
enum { __value = 1 };
};
template<bool, typename> struct __enable_if { };
template<typename T> struct __enable_if<true, T> {
typedef T __type;
};
// PR5063
template<typename T> typename __enable_if<__is_scalar_type<T>::__value, void>::__type ft7() { }
// CHECK: @_Z3ft7IiEN11__enable_ifIXsr16__is_scalar_typeIT_EE7__valueEvE6__typeEv
template void ft7<int>();
// CHECK: @_Z3ft7IPvEN11__enable_ifIXsr16__is_scalar_typeIT_EE7__valueEvE6__typeEv
template void ft7<void*>();
// PR5144
extern "C" {
void extern_f(void);
};
// CHECK: @extern_f
void extern_f(void) { }
struct S7 {
S7();
struct S { S(); };
struct {
S s;
} a;
};
// PR5139
// CHECK: @_ZN2S7C1Ev
// CHECK: @_ZN2S7C2Ev
// CHECK: @_ZN2S7Ut_C1Ev
S7::S7() {}
// PR5063
template<typename T> typename __enable_if<(__is_scalar_type<T>::__value), void>::__type ft8() { }
// CHECK: @_Z3ft8IiEN11__enable_ifIXsr16__is_scalar_typeIT_EE7__valueEvE6__typeEv
template void ft8<int>();
// CHECK: @_Z3ft8IPvEN11__enable_ifIXsr16__is_scalar_typeIT_EE7__valueEvE6__typeEv
template void ft8<void*>();
// PR5796
namespace PR5796 {
template<typename> struct __is_scalar_type {
enum { __value = 0 };
};
template<bool, typename> struct __enable_if {};
template<typename T> struct __enable_if<true, T> { typedef T __type; };
template<typename T>
// CHECK: define linkonce_odr void @_ZN6PR57968__fill_aIiEENS_11__enable_ifIXntsr16__is_scalar_typeIT_EE7__valueEvE6__typeEv
typename __enable_if<!__is_scalar_type<T>::__value, void>::__type __fill_a() { };
void f() { __fill_a<int>(); }
}
namespace Expressions {
// Unary operators.
// CHECK: define weak_odr void @_ZN11Expressions2f1ILi1EEEvPAplngT_Li2E_i
template <int i> void f1(int (*)[(-i) + 2]) { };
template void f1<1>(int (*)[1]);
// CHECK: define weak_odr void @_ZN11Expressions2f2ILi1EEEvPApsT__i
template <int i> void f2(int (*)[+i]) { };
template void f2<1>(int (*)[1]);
// Binary operators.
// CHECK: define weak_odr void @_ZN11Expressions2f3ILi1EEEvPAplT_T__i
template <int i> void f3(int (*)[i+i]) { };
template void f3<1>(int (*)[2]);
// CHECK: define weak_odr void @_ZN11Expressions2f4ILi1EEEvPAplplLi2ET_T__i
template <int i> void f4(int (*)[2 + i+i]) { };
template void f4<1>(int (*)[4]);
// The ternary operator.
// CHECK: define weak_odr void @_ZN11Expressions2f4ILb1EEEvPAquT_Li1ELi2E_i
template <bool b> void f4(int (*)[b ? 1 : 2]) { };
template void f4<true>(int (*)[1]);
}
struct Ops {
Ops& operator+(const Ops&);
Ops& operator-(const Ops&);
Ops& operator&(const Ops&);
Ops& operator*(const Ops&);
void *v;
};
// CHECK: define %struct.Ops* @_ZN3OpsplERKS_
Ops& Ops::operator+(const Ops&) { return *this; }
// CHECK: define %struct.Ops* @_ZN3OpsmiERKS_
Ops& Ops::operator-(const Ops&) { return *this; }
// CHECK: define %struct.Ops* @_ZN3OpsanERKS_
Ops& Ops::operator&(const Ops&) { return *this; }
// CHECK: define %struct.Ops* @_ZN3OpsmlERKS_
Ops& Ops::operator*(const Ops&) { return *this; }
// PR5861
namespace PR5861 {
template<bool> class P;
template<> class P<true> {};
template<template <bool> class, bool>
struct Policy { };
template<typename T, typename = Policy<P, true> > class Alloc
{
T *allocate(int, const void*) { return 0; }
};
// CHECK: define weak_odr i8* @_ZN6PR58615AllocIcNS_6PolicyINS_1PELb1EEEE8allocateEiPKv
template class Alloc<char>;
}
// CHECK: define void @_Z1fU13block_pointerFiiiE
void f(int (^)(int, int)) { }
void pr5966_foo() {
extern int pr5966_i;
pr5966_i = 0;
}
static int pr5966_i;
void pr5966_bar() {
pr5966_i = 0;
}
namespace test0 {
int ovl(int x);
char ovl(double x);
template <class T> void f(T, char (&buffer)[sizeof(ovl(T()))]) {}
void test0() {
char buffer[1];
f(0.0, buffer);
}
// CHECK: define void @_ZN5test05test0Ev()
// CHECK: define linkonce_odr void @_ZN5test01fIdEEvT_RAszcl3ovlcvS1__EE_c(
void test1() {
char buffer[sizeof(int)];
f(1, buffer);
}
// CHECK: define void @_ZN5test05test1Ev()
// CHECK: define linkonce_odr void @_ZN5test01fIiEEvT_RAszcl3ovlcvS1__EE_c(
template <class T> void g(char (&buffer)[sizeof(T() + 5.0f)]) {}
void test2() {
char buffer[sizeof(float)];
g<float>(buffer);
}
// CHECK: define linkonce_odr void @_ZN5test01gIfEEvRAszplcvT__ELf40a00000E_c(
template <class T> void h(char (&buffer)[sizeof(T() + 5.0)]) {}
void test3() {
char buffer[sizeof(double)];
h<float>(buffer);
}
// CHECK: define linkonce_odr void @_ZN5test01hIfEEvRAszplcvT__ELd4014000000000000E_c(
template <class T> void j(char (&buffer)[sizeof(T().buffer)]) {}
struct A { double buffer[128]; };
void test4() {
char buffer[1024];
j<A>(buffer);
}
// CHECK: define linkonce_odr void @_ZN5test01jINS_1AEEEvRAszdtcvT__E6buffer_c(
template <class T> void k(char (&buffer)[sizeof(T() + 0.0f)]) {}
void test5() {
char buffer[sizeof(float)];
k<float>(buffer);
}
// CHECK: define linkonce_odr void @_ZN5test01kIfEEvRAszplcvT__ELf00000000E_c(
}
namespace test1 {
template<typename T> struct X { };
template<template<class> class Y, typename T> void f(Y<T>) { }
// CHECK: define weak_odr void @_ZN5test11fINS_1XEiEEvT_IT0_E
template void f(X<int>);
}
// CHECK: define internal void @_ZL27functionWithInternalLinkagev()
static void functionWithInternalLinkage() { }
void g() { functionWithInternalLinkage(); }
namespace test2 {
template <class T> decltype(((T*) 0)->member) read_member(T& obj) {
return obj.member;
}
struct A { int member; } obj;
int test() {
return read_member(obj);
}
// CHECK: define linkonce_odr i32 @_ZN5test211read_memberINS_1AEEEDtptcvPT_Li0E6memberERS2_(
}
// rdar://problem/9280586
namespace test3 {
struct AmbiguousBase { int ab; };
struct Path1 : AmbiguousBase { float p; };
struct Path2 : AmbiguousBase { double p; };
struct Derived : Path1, Path2 { };
// CHECK: define linkonce_odr i32 @_ZN5test38get_ab_1INS_7DerivedEEEDtptcvPT_Li0Esr5Path1E2abERS2_(
template <class T> decltype(((T*) 0)->Path1::ab) get_ab_1(T &ref) { return ref.Path1::ab; }
// CHECK: define linkonce_odr i32 @_ZN5test38get_ab_2INS_7DerivedEEEDtptcvPT_Li0Esr5Path2E2abERS2_(
template <class T> decltype(((T*) 0)->Path2::ab) get_ab_2(T &ref) { return ref.Path2::ab; }
// CHECK: define linkonce_odr float @_ZN5test37get_p_1INS_7DerivedEEEDtptcvPT_Li0Esr5Path1E1pERS2_(
template <class T> decltype(((T*) 0)->Path1::p) get_p_1(T &ref) { return ref.Path1::p; }
// CHECK: define linkonce_odr double @_ZN5test37get_p_2INS_7DerivedEEEDtptcvPT_Li0Esr5Path2E1pERS2_(
template <class T> decltype(((T*) 0)->Path2::p) get_p_2(T &ref) { return ref.Path2::p; }
Derived obj;
void test() {
get_ab_1(obj);
get_ab_2(obj);
get_p_1(obj);
get_p_2(obj);
}
}
// CHECK: define void @_ZN5test41gEPNS_3zedIXadL_ZNS_3foo3barEEEEE
namespace test4 {
struct foo { int bar; };
template <int (foo::*)>
struct zed {};
void g(zed<&foo::bar>*)
{}
}
// CHECK: define void @_ZN5test51gEPNS_3zedIXadL_ZNS_3foo3barEEEEE
namespace test5 {
struct foo { static int bar; };
template <int *>
struct zed {};
void g(zed<&foo::bar>*)
{}
}
// CHECK: define void @_ZN5test61gEPNS_3zedIXadL_ZNS_3foo3barEvEEEE
namespace test6 {
struct foo { int bar(); };
template <int (foo::*)()>
struct zed {};
void g(zed<&foo::bar>*)
{}
}
// CHECK: define void @_ZN5test71gEPNS_3zedIXadL_ZNS_3foo3barEvEEEE
namespace test7 {
struct foo { static int bar(); };
template <int (*f)()>
struct zed {};
void g(zed<&foo::bar>*)
{}
}
// CHECK: define weak_odr void @_ZN5test81AILZNS_1B5valueEEE3incEv
namespace test8 {
template <int &counter> class A { void inc() { counter++; } };
class B { public: static int value; };
template class A<B::value>;
}
// CHECK: declare void @_ZN5test91fIiNS_3barEEEvRKNT0_3baz1XE
namespace test9 {
template<class T>
struct foo {
typedef T X;
};
struct bar {
typedef foo<int> baz;
};
template <class zaz, class zed>
void f(const typename zed::baz::X&);
void g() {
f<int, bar>( 0);
}
}
// <rdar://problem/7825453>
namespace test10 {
template <char P1> struct S {};
template <char P2> void f(struct S<false ? 'a' : P2> ) {}
// CHECK: define weak_odr void @_ZN6test101fILc3EEEvNS_1SIXquLb0ELc97ET_EEE(
template void f<(char) 3>(struct S<3>);
}
namespace test11 {
// CHECK: @_ZN6test111fEz
void f(...) { }
struct A {
void f(...);
};
// CHECK: @_ZN6test111A1fEz
void A::f(...) { }
}
namespace test12 {
// CHECK: _ZN6test121fENS_1AILt33000EEE
template <unsigned short> struct A { };
void f(A<33000>) { }
}
// PR7446
namespace test13 {
template <template <class> class T> class A {};
template <class U> class B {};
template <template<class> class T> void foo(const A<T> &a) {}
// CHECK: define weak_odr void @_ZN6test133fooINS_1BEEEvRKNS_1AIT_EE(
template void foo(const A<B> &a);
}
namespace test14 {
extern "C" {
struct S {
static int a(), x;
};
// CHECK: define i32 @_ZN6test141S1aEv
// CHECK: load i32* @_ZN6test141S1xE
int S::a() { return S::x; }
}
}
// rdar://problem/8204122
namespace test15 {
enum E { e = 3 };
template <int I> struct S {};
template <int I> void f(S<I + e>) {}
// CHECK: define weak_odr void @_ZN6test151fILi7EEEvNS_1SIXplT_LNS_1EE3EEEE(
template void f<7>(S<7 + e>);
}
// rdar://problem/8302148
namespace test17 {
template <int N> struct A {};
struct B {
static int foo(void);
};
template <class T> A<sizeof(T::foo())> func(void);
// CHECK: define void @_ZN6test174testEv()
// CHECK: call {{.*}} @_ZN6test174funcINS_1BEEENS_1AIXszclsrT_3fooEEEEv()
void test() {
func<B>();
}
}
// PR7891
namespace test18 {
struct A {
int operator+();
int operator-();
int operator*();
int operator&();
};
template <int (A::*)()> struct S {};
template <typename T> void f(S<&T::operator+>) {}
template void f<A>(S<&A::operator+>);
template <typename T> void f(S<&T::operator- >) {}
template void f<A>(S<&A::operator- >);
template <typename T> void f(S<&T::operator*>) {}
template void f<A>(S<&A::operator*>);
template <typename T> void f(S<&T::operator&>) {}
template void f<A>(S<&A::operator&>);
// CHECK: define weak_odr void @_ZN6test181fINS_1AEEEvNS_1SIXadsrT_plEEE
// CHECK: define weak_odr void @_ZN6test181fINS_1AEEEvNS_1SIXadsrT_miEEE
// CHECK: define weak_odr void @_ZN6test181fINS_1AEEEvNS_1SIXadsrT_mlEEE
// CHECK: define weak_odr void @_ZN6test181fINS_1AEEEvNS_1SIXadsrT_anEEE
}
// rdar://problem/8332117
namespace test19 {
struct A {
template <typename T> int f();
int operator+();
operator int();
template <typename T> int operator-();
};
template <int (A::*)()> struct S {};
template <typename T> void g (S<&T::template f<int> >) {}
template <typename T> void g (S<&T::operator+ >) {}
template <typename T> void g (S<&T::operator int>) {}
template <typename T> void g (S<&T::template operator- <double> >) {}
// CHECK: define weak_odr void @_ZN6test191gINS_1AEEEvNS_1SIXadsrT_1fIiEEEE(
template void g<A>(S<&A::f<int> >);
// CHECK: define weak_odr void @_ZN6test191gINS_1AEEEvNS_1SIXadsrT_plEEE(
template void g<A>(S<&A::operator+>);
// CHECK: define weak_odr void @_ZN6test191gINS_1AEEEvNS_1SIXadsrT_cviEEE(
template void g<A>(S<&A::operator int>);
// CHECK: define weak_odr void @_ZN6test191gINS_1AEEEvNS_1SIXadsrT_miIdEEEE(
template void g<A>(S<&A::operator-<double> >);
}
namespace test20 {
template <class T> T *f(const T&);
template <class T> T *f(T*);
// CHECK: define weak_odr void @_ZN6test205test0IiEEvDTcl1fIPT_ELi0EEE(
template <class T> void test0(decltype(f<T*>(0))) {}
template void test0<int>(decltype(f<int*>(0)));
// CHECK: define weak_odr void @_ZN6test205test1IiEEvDTcl1fIEcvT__EEE(
template <class T> void test1(decltype(f<>(T()))) {}
template void test1<int>(decltype(f<>(int())));
}
// rdar:// 8620510
namespace test21 {
// CHECK: define void @_ZN6test2112vla_arg_funcEiPA_i(
void vla_arg_func(int X, int a[X][X]) {}
}
namespace test22 {
// CHECK: define void @_ZN6test221fEDn(
void f(decltype(nullptr)) { }
}
// rdar://problem/8913416
namespace test23 {
typedef void * const vpc;
// CHECK: define void @_ZN6test231fERA10_KPv(
void f(vpc (&)[10]) {}
typedef vpc vpca5[5];
void f(vpca5 volatile (&)[10]) {}
// CHECK: define void @_ZN6test231fERA10_A5_VKPv(
}
namespace test24 {
void test0() {
extern int foo();
// CHECK: call i32 @_ZN6test243fooEv()
foo();
}
static char foo() {}
void test1() {
// CHECK: call signext i8 @_ZN6test24L3fooEv()
foo();
}
}
// rdar://problem/8806641
namespace test25 {
template <void (*fn)()> struct A {
static void call() { fn(); }
};
void foo();
void test() {
// CHECK: call void @_ZN6test251AIXadL_ZNS_3fooEvEEE4callEv()
A<foo>::call();
}
}
namespace test26 {
template <template <class> class T> void foo(decltype(T<float>::object) &object) {}
template <class T> struct holder { static T object; };
void test() {
float f;
// CHECK: call void @_ZN6test263fooINS_6holderEEEvRDtsrT_IfE6objectE(
foo<holder>(f);
}
}
namespace test27 {
struct A {
struct inner {
float object;
};
float meth();
};
typedef A Alias;
template <class T> void a(decltype(T::inner::object) &object) {}
template <class T> void b(decltype(T().Alias::meth()) &object) {}
void test() {
float f;
// CHECK: call void @_ZN6test271aINS_1AEEEvRDtsrNT_5innerE6objectE(
a<A>(f);
// CHECK: call void @_ZN6test271bINS_1AEEEvRDTcldtcvT__Esr5AliasE4methEE(
b<A>(f);<|fim▁hole|>}
// An injected class name type in a unresolved-name.
namespace test28 {
template <class T> struct A {
enum { bit };
};
template <class T> void foo(decltype(A<T>::A::bit) x);
void test() {
foo<char>(A<char>::bit);
// CHECK: call void @_ZN6test283fooIcEEvDtsr1AIT_E1AE3bitE(
}
}
// An enclosing template type parameter in an unresolved-name.
namespace test29 {
template <class T> struct A {
template <class U> static void foo(decltype(T::fn(U())) x);
};
struct B { static int fn(int); static long fn(long); };
void test() {
A<B>::foo<int>(0);
// CHECK: call void @_ZN6test291AINS_1BEE3fooIiEEvDTclsrS1_2fncvT__EEE(
}
}
// An enclosing template template parameter in an unresolved-name.
namespace test30 {
template <template <class> class T> struct A {
template <class U> static void foo(decltype(T<U>::fn()) x);
};
template <class T> struct B { static T fn(); };
void test() {
A<B>::foo<int>(0);
// CHECK: call void @_ZN6test301AINS_1BEE3fooIiEEvDTclsrS1_IT_EE2fnEE(
}
}
namespace test31 { // instantiation-dependent mangling of decltype
int x;
template<class T> auto f1(T p)->decltype(x) { return 0; }
// The return type in the mangling of the template signature
// is encoded as "i".
template<class T> auto f2(T p)->decltype(p) { return 0; }
// The return type in the mangling of the template signature
// is encoded as "Dtfp_E".
void g(int);
template<class T> auto f3(T p)->decltype(g(p)) {}
// CHECK: define weak_odr i32 @_ZN6test312f1IiEEiT_(
template int f1(int);
// CHECK: define weak_odr i32 @_ZN6test312f2IiEEDtfp_ET_
template int f2(int);
// CHECK: define weak_odr void @_ZN6test312f3IiEEDTcl1gfp_EET_
template void f3(int);
}
// PR10205
namespace test32 {
template<typename T, int=T::value> struct A {
typedef int type;
};
struct B { enum { value = 4 }; };
template <class T> typename A<T>::type foo() { return 0; }
void test() {
foo<B>();
// CHECK: call i32 @_ZN6test323fooINS_1BEEENS_1AIT_XsrS3_5valueEE4typeEv()
}
}
namespace test33 {
template <class T> struct X {
enum { value = T::value };
};
template<typename T, int=X<T>::value> struct A {
typedef int type;
};
struct B { enum { value = 4 }; };
template <class T> typename A<T>::type foo() { return 0; }
void test() {
foo<B>();
// CHECK: call i32 @_ZN6test333fooINS_1BEEENS_1AIT_Xsr1XIS3_EE5valueEE4typeEv()
}
}
namespace test34 {
// Mangling for instantiation-dependent decltype expressions.
template<typename T>
void f(decltype(sizeof(decltype(T() + T())))) {}
// CHECK: define weak_odr void @_ZN6test341fIiEEvDTstDTplcvT__EcvS1__EEE
template void f<int>(decltype(sizeof(1)));
// Mangling for non-instantiation-dependent sizeof expressions.
template<unsigned N>
void f2(int (&)[N + sizeof(int*)]) {}
// CHECK: define weak_odr void @_ZN6test342f2ILj4EEEvRAplT_Lm8E_i
template void f2<4>(int (&)[4 + sizeof(int*)]);
// Mangling for non-instantiation-dependent sizeof expressions
// involving an implicit conversion of the result of the sizeof.
template<unsigned long long N>
void f3(int (&)[N + sizeof(int*)]) {}
// CHECK: define weak_odr void @_ZN6test342f3ILy4EEEvRAplT_Ly8E_i
template void f3<4>(int (&)[4 + sizeof(int*)]);
// Mangling for instantiation-dependent sizeof() expressions as
// template arguments.
template<unsigned> struct A { };
template<typename T> void f4(::test34::A<sizeof(sizeof(decltype(T() + T())))>) { }
// CHECK: define weak_odr void @_ZN6test342f4IiEEvNS_1AIXszstDTplcvT__EcvS2__EEEEE
template void f4<int>(A<sizeof(sizeof(int))>);
}
namespace test35 {
// Dependent operator names of unknown arity.
struct A {
template<typename U> A operator+(U) const;
};
template<typename T>
void f1(decltype(sizeof(&T::template operator+<int>))) {}
// CHECK: define weak_odr void @_ZN6test352f1INS_1AEEEvDTszadsrT_plIiEE
template void f1<A>(__SIZE_TYPE__);
}
namespace test36 {
template<unsigned> struct A { };
template<typename ...Types>
auto f1(Types... values) -> A<sizeof...(values)> { }
// CHECK: define weak_odr {{.*}} @_ZN6test362f1IJifEEENS_1AIXsZfp_EEEDpT_
template A<2> f1(int, float);
}
namespace test37 {
struct foo {
struct {
} a;
typedef struct { } b;
typedef struct { } *c;
struct {
} d;
};
template<typename T> void func(T) { }
void test() {
// CHECK: define linkonce_odr void @_ZN6test374funcINS_3fooUt_EEEvT_
func(foo().a);
// CHECK: define linkonce_odr void @_ZN6test374funcINS_3fooUt0_EEEvT_
func(*foo::c());
// CHECK: define linkonce_odr void @_ZN6test374funcINS_3fooUt1_EEEvT_
func(foo().d);
}
}
// CHECK: define void @_Z6ASfuncPU3AS3i
void ASfunc(__attribute__((address_space(3))) int* x) {}
namespace test38 {
// CHECK: define linkonce_odr void @_ZN6test384funcINS_3fooUt_EEEvT_
typedef struct {
struct {
} a;
} foo;
template <typename T> void func(T) {}
void test() { func(foo().a); }
}
namespace test39 {
// CHECK: define internal void @"_ZN6test394funcINS_3$_03$_1EEEvT_"
typedef struct {
struct {} a;
} *foo;
template<typename T> void func(T) {}
void test(foo x) {
func(x->a);
}
}
namespace test40 {
// CHECK: i32* @_ZZN6test401fEvE1a_0
void h(int&);
inline void f() {
if (0) {
static int a;
}
static int a;
h(a);
};
void g() { f(); }
}<|fim▁end|> | } |
<|file_name|>archive.go<|end_file_name|><|fim▁begin|>// Copyright 2014 Docker authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the DOCKER-LICENSE file.
package archive
import (
"archive/tar"
"bufio"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
const (
// Uncompressed represents the uncompressed.
Uncompressed Compression = iota
// Bzip2 is bzip2 compression algorithm.
Bzip2
// Gzip is gzip compression algorithm.
Gzip
// Xz is xz compression algorithm.
Xz
)
const (
modeISDIR = 040000 // Directory
modeISFIFO = 010000 // FIFO
modeISREG = 0100000 // Regular file
modeISLNK = 0120000 // Symbolic link
modeISBLK = 060000 // Block special file
modeISCHR = 020000 // Character special file
modeISSOCK = 0140000 // Socket
)
// Compression is the state represents if compressed or not.
type Compression int
// Extension returns the extension of a file that uses the specified compression algorithm.
func (compression *Compression) Extension() string {
switch *compression {
case Uncompressed:
return "tar"
case Bzip2:
return "tar.bz2"
case Gzip:
return "tar.gz"
case Xz:
return "tar.xz"
}
return ""
}
// WhiteoutFormat is the format of whiteouts unpacked
type WhiteoutFormat int
// TarOptions wraps the tar options.
type TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
ChownOpts *idtools.Identity
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
// For each include when creating an archive, the included name will be
// replaced with the matching name from this map.
RebaseNames map[string]string
InUserNS bool
}
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
if err != nil {
return nil, err
}
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(pipeWriter, options.Compression)
if err != nil {
return nil, err
}
go func() {
ta := newTarAppender(
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
compressWriter,
options.ChownOpts,
)
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Errorf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Errorf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Errorf("Can't close pipe writer: %s", err)
}
}()
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
stat, err := os.Lstat(srcPath)
if err != nil {
return
}
if !stat.IsDir() {
// We can't later join a non-dir with any includes because the
// 'walk' will error if "file/." is stat-ed and "file" is not a
// directory. So, we must split the source path and use the
// basename as the include.
if len(options.IncludeFiles) > 0 {
logrus.Warn("Tar: Can't archive a file with includes")
}
dir, base := SplitPathDirEntry(srcPath)
srcPath = dir
options.IncludeFiles = []string{base}
}
if len(options.IncludeFiles) == 0 {
options.IncludeFiles = []string{"."}
}
seen := make(map[string]bool)
for _, include := range options.IncludeFiles {
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
relFilePath, err := filepath.Rel(srcPath, filePath)
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
// Error getting relative path OR we are looking
// at the source directory path. Skip in both situations.
return nil
}
if options.IncludeSourceDir && include == "." && relFilePath != "." {
relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
}
skip := false
// If "include" is an exact match for the current file
// then even if there's an "excludePatterns" pattern that
// matches it, don't skip it. IOW, assume an explicit 'include'
// is asking for that file no matter what - which is true
// for some files, like .dockerignore and Dockerfile (sometimes)
if include != relFilePath {
skip, err = pm.Matches(relFilePath)
if err != nil {
logrus.Errorf("Error matching %s: %v", relFilePath, err)
return err
}
}
if skip {
// If we want to skip this file and its a directory
// then we should first check to see if there's an
// excludes pattern (e.g. !dir/file) that starts with this
// dir. If so then we can't skip this dir.
// Its not a dir then so we can just return/skip.
if !f.IsDir() {
return nil
}
// No exceptions (!...) in patterns so just skip dir
if !pm.Exclusions() {
return filepath.SkipDir
}
dirSlash := relFilePath + string(filepath.Separator)
for _, pat := range pm.Patterns() {
if !pat.Exclusion() {
continue
}
if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
// found a match - so can't skip this dir
return nil
}
}
// No matching exclusion dir so just skip dir
return filepath.SkipDir
}
if seen[relFilePath] {
return nil
}
seen[relFilePath] = true
// Rename the base resource.
if rebaseName != "" {
var replacement string
if rebaseName != string(filepath.Separator) {
// Special case the root directory to replace with an
// empty string instead so that we don't end up with
// double slashes in the paths.<|fim▁hole|> replacement = rebaseName
}
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
if err == io.ErrClosedPipe {
return err
}
}
return nil
})
}
}()
return pipeReader, nil
}
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
switch compression {
case Uncompressed:
writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
return writeBufWrapper, nil
case Gzip:
gzWriter := gzip.NewWriter(dest)
writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
return writeBufWrapper, nil
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
type tarWhiteoutConverter interface {
ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
ConvertRead(*tar.Header, string) (bool, error)
}
type tarAppender struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
// for hardlink mapping
SeenFiles map[uint64]string
IdentityMapping *idtools.IdentityMapping
ChownOpts *idtools.Identity
// For packing and unpacking whiteout files in the
// non standard format. The whiteout files defined
// by the AUFS standard are used as the tar whiteout
// standard.
WhiteoutConverter tarWhiteoutConverter
}
func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
return &tarAppender{
SeenFiles: make(map[uint64]string),
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
IdentityMapping: idMapping,
ChownOpts: chownOpts,
}
}
// addTarFile adds to the tar archive a file from `path` as `name`
func (ta *tarAppender) addTarFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
return err
}
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return err
}
}
hdr, err := FileInfoHeader(name, fi, link)
if err != nil {
return err
}
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
return err
}
// if it's not a directory and has more than 1 link,
// it's hard linked, so set the type flag accordingly
if !fi.IsDir() && hasHardlinks(fi) {
inode, err := getInodeFromStat(fi.Sys())
if err != nil {
return err
}
// a link should have a name that it links too
// and that linked name should be first in the tar archive
if oldpath, ok := ta.SeenFiles[inode]; ok {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = oldpath
hdr.Size = 0 // This Must be here for the writer math to add up!
} else {
ta.SeenFiles[inode] = name
}
}
//check whether the file is overlayfs whiteout
//if yes, skip re-mapping container ID mappings.
isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
//handle re-mapping container ID mappings back to host ID mappings before
//writing tar headers/files. We skip whiteout files because they were written
//by the kernel and already have proper ownership relative to the host
if !isOverlayWhiteout &&
!strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) &&
!ta.IdentityMapping.Empty() {
fileIdentity, err := getFileIdentity(fi.Sys())
if err != nil {
return err
}
hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIdentity)
if err != nil {
return err
}
}
// explicitly override with ChownOpts
if ta.ChownOpts != nil {
hdr.Uid = ta.ChownOpts.UID
hdr.Gid = ta.ChownOpts.GID
}
if ta.WhiteoutConverter != nil {
wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
if err != nil {
return err
}
// If a new whiteout file exists, write original hdr, then
// replace hdr with wo to be written after. Whiteouts should
// always be written after the original. Note the original
// hdr may have been updated to be a whiteout with returning
// a whiteout header
if wo != nil {
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
return fmt.Errorf("tar: cannot use whiteout for non-empty file")
}
hdr = wo
}
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
// We use system.OpenSequential to ensure we use sequential file
// access on Windows to avoid depleting the standby list.
// On Linux, this equates to a regular os.Open.
file, err := system.OpenSequential(path)
if err != nil {
return err
}
ta.Buffer.Reset(ta.TarWriter)
defer ta.Buffer.Reset(nil)
_, err = io.Copy(ta.Buffer, file)
file.Close()
if err != nil {
return err
}
err = ta.Buffer.Flush()
if err != nil {
return err
}
}
return nil
}
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
capability, _ := system.Lgetxattr(path, "security.capability")
if capability != nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
}
return nil
}
// FileInfoHeader creates a populated Header from fi.
// Compared to archive pkg this function fills in more information.
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
// which have been deleted since Go 1.9 archive/tar.
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
hdr, err := tar.FileInfoHeader(fi, link)
if err != nil {
return nil, err
}
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
name, err = canonicalTarName(name, fi.IsDir())
if err != nil {
return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
return nil, err
}
return hdr, nil
}
// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
// https://github.com/golang/go/commit/66b5a2f
func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
fm := fi.Mode()
switch {
case fm.IsRegular():
mode |= modeISREG
case fi.IsDir():
mode |= modeISDIR
case fm&os.ModeSymlink != 0:
mode |= modeISLNK
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
mode |= modeISCHR
} else {
mode |= modeISBLK
}
case fm&os.ModeNamedPipe != 0:
mode |= modeISFIFO
case fm&os.ModeSocket != 0:
mode |= modeISSOCK
}
return mode
}
// canonicalTarName provides a platform-independent and consistent posix-style
//path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) {
name, err := CanonicalTarNameForPath(name)
if err != nil {
return "", err
}
// suffix with '/' for directories
if isDir && !strings.HasSuffix(name, "/") {
name += "/"
}
return name, nil
}<|fim▁end|> | |
<|file_name|>AddListener.java<|end_file_name|><|fim▁begin|>/*
* RapidMiner
*
* Copyright (C) 2001-2011 by Rapid-I and the contributors
*
* Complete list of developers available at our web site:
*
* http://rapid-i.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/.
*/
package com.rapidminer.operator;
/**
* These listeners will be notified after a new operator was added to a chain.
*
* @author Ingo Mierswa
*/
public interface AddListener {
public void operatorAdded(Operator newChild);
<|fim▁hole|><|fim▁end|> | } |
<|file_name|>map2tmx.js<|end_file_name|><|fim▁begin|>"use strict"
const process = require(`process`)
const fs = require(`fs`)
const path = require(`path`)
const js2xmlparser = require(`js2xmlparser`)
const abbrevJson = require(`../abbrevJson`)
const asset = require(`../asset.js`)
const loadMappoMap = require(`../loadMappoMap`)
const mapFilename = process.argv[2]
const mappoMap = loadMappoMap({mapFilename})
console.log(abbrevJson(mappoMap))
const tileset = mappoMap.tileset
const tileWidth = tileset.tileWidth
const tileHeight = tileset.tileHeight
const vspFilename = path.basename(tileset.imageFilename)
const tileColumns = 20
const tileRows = ~~((tileset.tileCount + 19) / 20)
const vspPngWidth = tileWidth * tileColumns
const vspPngHeight = tileHeight * tileRows
const obj = {
'@': {
version: `1.0`,
orientation: `orthogonal`,
width: mappoMap.tileLayers[0].width,
height: mappoMap.tileLayers[0].height,
tilewidth: tileWidth,
tileheight: tileHeight,
},
tileset: {
'@': {<|fim▁hole|> firstgid: 1,
name: vspFilename,
tilewidth: tileWidth,
tileheight: tileHeight,
spacing: 0,
margin: 0,
tilecount: tileset.tileCount,
columns: tileColumns,
},
image: {
'@': {
source: vspFilename,
width: vspPngWidth,
height: vspPngHeight,
}
},
}
}
obj.layer = mappoMap.mapLayerOrder.map(layerIndex => {
const tileLayer = mappoMap.tileLayers[layerIndex]
return {
'@': {
name: tileLayer.description,
width: tileLayer.width,
height: tileLayer.height,
},
data: {
'@': {
encoding: `csv`,
},
'#': tileLayer.tileIndexGrid.map(v => ++v).join(`,`),
}
}
})
const targetFilename = mapFilename + `.tmx`
const xml = js2xmlparser.parse(`map`, obj)
fs.writeFileSync(targetFilename, xml)
console.log(`converted`, mapFilename, `to`, targetFilename)<|fim▁end|> | |
<|file_name|>ClientInfo.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*<|fim▁hole|> * software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* ClientInfo.java
*
* This file was auto-generated from WSDL
* by the Apache Axis2 version: #axisVersion# #today#
*/
package org.apache.axis2.databinding;
import org.apache.axiom.om.OMFactory;
import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLStreamWriter;
import javax.xml.namespace.QName;
import javax.xml.stream.XMLStreamException;
/** ClientInfo bean class */
public class ClientInfo
implements org.apache.axis2.databinding.ADBBean {
/* This type was generated from the piece of schema that had
name = ClientInfo
Namespace URI = http://www.wso2.com/types
Namespace Prefix = ns1
*/
public ClientInfo(String localName, String localSsn) {
this.localName = localName;
this.localSsn = localSsn;
}
public ClientInfo() {
}
/** field for Name */
protected java.lang.String localName;
/**
* Auto generated getter method
*
* @return java.lang.String
*/
public java.lang.String getName() {
return localName;
}
/**
* Auto generated setter method
*
* @param param Name
*/
public void setName(java.lang.String param) {
this.localName = param;
}
/** field for Ssn */
protected java.lang.String localSsn;
/**
* Auto generated getter method
*
* @return java.lang.String
*/
public java.lang.String getSsn() {
return localSsn;
}
/**
* Auto generated setter method
*
* @param param Ssn
*/
public void setSsn(java.lang.String param) {
this.localSsn = param;
}
/** databinding method to get an XML representation of this object */
public javax.xml.stream.XMLStreamReader getPullParser(javax.xml.namespace.QName qName) {
java.util.ArrayList elementList = new java.util.ArrayList();
java.util.ArrayList attribList = new java.util.ArrayList();
elementList.add(new javax.xml.namespace.QName("http://www.wso2.com/types",
"name"));
elementList
.add(org.apache.axis2.databinding.utils.ConverterUtil.convertToString(localName));
elementList.add(new javax.xml.namespace.QName("http://www.wso2.com/types",
"ssn"));
elementList.add(org.apache.axis2.databinding.utils.ConverterUtil.convertToString(localSsn));
return new org.apache.axis2.databinding.utils.reader.ADBXMLStreamReaderImpl
(qName, elementList.toArray(), attribList.toArray());
}
public void serialize(final QName parentQName,
final OMFactory factory,
MTOMAwareXMLStreamWriter xmlWriter)
throws XMLStreamException, ADBException {
serialize(parentQName,factory,xmlWriter,false);
}
public void serialize(final QName parentQName,
final OMFactory factory,
MTOMAwareXMLStreamWriter xmlWriter,
boolean serializeType)
throws XMLStreamException, ADBException {
throw new UnsupportedOperationException("Un implemented method");
}
/** Factory class that keeps the parse method */
public static class Factory {
/** static method to create the object */
public static ClientInfo parse(javax.xml.stream.XMLStreamReader reader)
throws java.lang.Exception {
ClientInfo object = new ClientInfo();
try {
int event = reader.getEventType();
int count = 0;
int argumentCount = 2;
boolean done = false;
//event better be a START_ELEMENT. if not we should go up to the start element here
while (!reader.isStartElement()) {
event = reader.next();
}
while (!done) {
if (javax.xml.stream.XMLStreamConstants.START_ELEMENT == event) {
if ("name".equals(reader.getLocalName())) {
String content = reader.getElementText();
object.setName(
org.apache.axis2.databinding.utils.ConverterUtil.convertToString(
content));
count++;
}
if ("ssn".equals(reader.getLocalName())) {
String content = reader.getElementText();
object.setSsn(
org.apache.axis2.databinding.utils.ConverterUtil.convertToString(
content));
count++;
}
}
if (argumentCount == count) {
done = true;
}
if (!done) {
event = reader.next();
}
}
} catch (javax.xml.stream.XMLStreamException e) {
throw new java.lang.Exception(e);
}
return object;
}
}//end of factory class
}<|fim▁end|> | * Unless required by applicable law or agreed to in writing, |
<|file_name|>controllers.js<|end_file_name|><|fim▁begin|>var phonecatControllers = angular.module('phonecatControllers', []);
phonecatControllers.controller('PhoneListCtrl', ['$scope', '$http',
function ($scope, $http) {
$http.get('phones/phones.json').success(function(data) {
$scope.phones = data;
});
$scope.orderProp = 'age';
}]);
phonecatControllers.controller('PhoneDetailCtrl', ['$scope', '$routeParams', '$http',
function($scope, $routeParams, $http) {
$http.get('phones/' + $routeParams.phoneId + '.json').success(function(data) {
$scope.phone = data;
$scope.mainImageUrl = data.images[0];
});
<|fim▁hole|> }]);<|fim▁end|> | $scope.setImage = function(imageUrl) {
$scope.mainImageUrl = imageUrl;
} |
<|file_name|>jquery.ui.datepicker-no.js<|end_file_name|><|fim▁begin|>/* Norwegian initialisation for the jQuery UI date picker plugin. */
/* Written by Naimdjon Takhirov ([email protected]). */
jQuery(function($){
$.datepicker.regional['no'] = {
closeText: 'Lukk',
prevText: '«Forrige',
nextText: 'Neste»',
<|fim▁hole|> dayNames: ['søndag','mandag','tirsdag','onsdag','torsdag','fredag','lørdag'],
dayNamesMin: ['sø','ma','ti','on','to','fr','lø'],
weekHeader: 'Uke',
dateFormat: 'dd.mm.yy',
firstDay: 1,
isRTL: false,
showMonthAfterYear: false,
yearSuffix: ''
};
$.datepicker.setDefaults($.datepicker.regional['no']);
});<|fim▁end|> | currentText: 'I dag',
monthNames: ['januar','februar','mars','april','mai','juni','juli','august','september','oktober','november','desember'],
monthNamesShort: ['jan','feb','mar','apr','mai','jun','jul','aug','sep','okt','nov','des'],
dayNamesShort: ['søn','man','tir','ons','tor','fre','lør'],
|
<|file_name|>projmgr.py<|end_file_name|><|fim▁begin|>import tangelo
import pymongo
import bson.json_util
from ArborFileManagerAPI import ArborFileManager
api = ArborFileManager()
api.initDatabaseConnection()
@tangelo.restful
def get(*pargs, **query_args):
if len(pargs) == 0:
return tangelo.HTTPStatusCode(400, "Missing resource type")
resource_type = pargs[0]
allowed = ["project", "analysis","collection", "workflow"]
if resource_type == "project":
if len(pargs) == 1:
return api.getListOfProjectNames()
elif len(pargs) == 2:
project = pargs[1]
return api.getListOfTypesForProject(project)
elif len(pargs) == 3:
project = pargs[1]
datatype = pargs[2]
return api.getListOfDatasetsByProjectAndType(project, datatype)
elif len(pargs) == 4:
project = pargs[1]
datatype = pargs[2]
dataset = pargs[3]
coll = api.db[api.returnCollectionForObjectByName(project, datatype, dataset)]
return bson.json_util.dumps(list(coll.find()))
elif len(pargs) == 5:
project = pargs[1]
datatype = pargs[2]
dataset = pargs[3]
stringFormat = pargs[4]
string = api.getDatasetAsTextString(project, datatype, dataset, stringFormat)
return string
else:
return tangelo.HTTPStatusCode(400, "Bad request - got %d parameter(s), was expecting between 1 and 5")
elif resource_type == "analysis":
if len(pargs) == 1:
return api.getListOfAnalysisNames()
elif len(pargs) == 2:<|fim▁hole|> analysis_name = pargs[1]
coll = api.db[api.returnCollectionForAnalysisByName(analysis_name)]
return bson.json_util.dumps(list(coll.find()))
elif len(pargs) == 3:
analysis_name = pargs[1]
coll = api.db[api.returnCollectionForAnalysisByName(analysis_name)]
return coll.find_one()["analysis"]["script"]
# add a collection option to return the database and collection name for an object in the
# Arbor treestore. This 'information hiding violation' of the treestore allows for low-level
# clients to connect and work directly with the mongo database, should it be needed. This level
# is used in the phylomap application.
elif resource_type == "collection":
if len(pargs) == 4:
project = pargs[1]
datatype = pargs[2]
dataset = pargs[3]
collname = api.returnCollectionForObjectByName(project, datatype, dataset)
dbname = api.getMongoDatabase()
dbhost = api.getMongoHost()
dbport = api.getMongoPort()
return bson.json_util.dumps({'host':dbhost,'port':dbport,'db': dbname,'collection': collname})
# if workflow is specified as the resource type, then list the workflows in a project or display the
# information about a particular workflow
elif resource_type == "workflow":
if len(pargs) == 2:
project = pargs[1]
return api.getListOfDatasetsByProjectAndType(project,"Workflow")
if len(pargs) == 3:
project = pargs[1]
workflowName = pargs[2]
print("REST: getting status of workflow:",workflowName)
return bson.json_util.dumps(api.getStatusOfWorkflow(workflowName,project))
else:
return tangelo.HTTPStatusCode(400, "Workflow resource requires 2 or 3 positional arguments")
else:
return tangelo.HTTPStatusCode(400, "Bad resource type '%s' - allowed types are: %s" % (resource_type, ", ".join(allowed)))
# Jan 2014 - added support for workflows as a datatype inside projects. new workflow-only named types are
# defined here to allow workflows to be created and run through the REST interface
#
@tangelo.restful
def put(resource, projname, datasetname=None, data=None, filename=None, filetype=None,
workflowName = None, stepName=None, stepType=None, inputStepName=None, outputStepName=None,
inPortName=None,outPortName=None,operation=None, parameterName=None, parameterValue=None,
parameterValueNumber=None,flowType=None,dataType=None, **kwargs):
if (resource != "project") and (resource != "workflow"):
return tangelo.HTTPStatusCode(400, "Bad resource type '%s' - allowed types are: project")
if resource == "project":
if datasetname is None:
api.newProject(projname)
else:
if filename is None:
return tangelo.HTTPStatusCode(400, "Missing argument 'filename'")
if filetype is None:
return tangelo.HTTPStatusCode(400, "Missing argument 'filetype'")
if data is None:
return tangelo.HTTPStatusCode(400, "Missing argument 'data'")
if datasetname is None:
return tangelo.HTTPStatusCode(400, "Missing argument 'datasetname'")
# user wants to upload a tree or a character matrix
if filetype == "newick" or filetype == "phyloxml":
api.newTreeInProjectFromString(datasetname, data, projname, filename, filetype)
if (filetype == "csv" and dataType is None) or (filetype == "csv" and dataType=='CharacterMatrix'):
api.newCharacterMatrixInProjectFromString(datasetname, data, projname, filename)
if filetype == "csv" and dataType=="Occurrences":
api.newOccurrencesInProjectFromString(datasetname, data, projname)
# workflow creation
# arborapi: /workflow/projname/workflowname - creates new empty workflow
# arborapi: /workflow/projname/workflowname//
if resource == "workflow":
# the user wants to create a new, empty workflow
if operation == "newWorkflow":
api.newWorkflowInProject(workflowName, projname)
if operation == "newWorkstepInWorkflow":
api.newWorkstepInWorkflow(workflowName, stepType, stepName, projname)
# allow user to add a parameter to a workstep or update the value of the parameter. There
# is currently a limitation that all values are strings, e.g. "2.4" instead of 2.4.
if operation == "updateWorkstepParameter":
# if a float argument is sent, use this as the value for the parameter, instead of the
# string. A conversion is done to float to assure numberic values
if parameterValueNumber != None:
print "found number filter value"
parameterValue = float(parameterValueNumber)
api.updateWorkstepParameter(workflowName, stepName, parameterName, parameterValue, projname)
if operation == "connectWorksteps":
#api.connectStepsInWorkflow(workflowName,outStepName,outPortName,inStepName,inPortName,projname)
api.connectStepsInWorkflow(workflowName,outputStepName,inputStepName,projname)
if operation == "executeWorkflow":
api.executeWorkflowInProject(workflowName,projname)
if operation == "updateWorkflowFromString":
print "received request to update workflow: ",workflowName
api.updateExistingWorkflowInProject(workflowName,data,projname)
return "OK"
@tangelo.restful
def post(*pargs, **kwargs):
return "projmgr.post()"
@tangelo.restful
def delete(resource, projname, datatype=None, dataset=None):
if resource != "project":
return tangelo.HTTPStatusCode(400, "Bad resource type '%s' - allowed types are: project")
# (This is expressing xor)
if (datatype is None) != (dataset is None):
return tangelo.HTTPStatusCode(400, "Bad arguments - 'datatype' and 'dataset' must both be specified if either one is specified")
if datatype is None:
api.deleteProjectNamed(projname)
else:
api.deleteDataset(projname, datatype, dataset)
return "OK"<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aftenposten import AftenpostenIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE, ARDMediathekIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
DailymotionCloudIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dramafever import (
DramaFeverIE,
DramaFeverSeriesIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import ImgurIE
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .letv import (
LetvIE,
LetvTvIE,
LetvPlaylistIE
)
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import NationalGeographicIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
)
from .ndr import (
NDRIE,
NJoyIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .nerdist import NerdistIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLIE,
NHLNewsIE,
NHLVideocenterIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowtv import NowTVIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
TegenlichtVproIE,
)
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
)
from .nuvid import NuvidIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .openfilm import OpenFilmIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .pinkbike import PinkbikeIE
from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
)
from .quickvid import QuickVidIE
from .r7 import R7IE
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .sandia import SandiaIE
from .safari import (
SafariIE,
SafariCourseIE,
)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snagfilms import (
SnagFilmsIE,
SnagFilmsEmbedIE,
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soompi import (
SoompiIE,
SoompiShowIE,
)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .space import SpaceIE
from .spankbang import SpankBangIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
from .srf import SrfIE
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
SVTPlayIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
from .thesixtyone import TheSixtyOneIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
)
from .tv4 import TV4IE
from .tvc import (
TVCIE,
TVCArticleIE,<|fim▁hole|>)
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
)
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .twitter import TwitterCardIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ultimedia import UltimediaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vessel import VesselIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import WebOfStoriesIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']<|fim▁end|> | |
<|file_name|>headers.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HeadersBinding::{HeadersInit, HeadersMethods, HeadersWrap};
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::iterable::Iterable;
use dom::bindings::js::Root;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::str::{ByteString, is_token};
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use hyper::header::Headers as HyperHeaders;
use mime::{Mime, TopLevel, SubLevel};
use std::cell::Cell;
use std::result::Result;
use std::str;
#[dom_struct]
pub struct Headers {
reflector_: Reflector,
guard: Cell<Guard>,
#[ignore_heap_size_of = "Defined in hyper"]
header_list: DOMRefCell<HyperHeaders>
}
// https://fetch.spec.whatwg.org/#concept-headers-guard
#[derive(Copy, Clone, JSTraceable, HeapSizeOf, PartialEq)]
pub enum Guard {
Immutable,
Request,
RequestNoCors,
Response,
None,
}
impl Headers {
pub fn new_inherited() -> Headers {
Headers {
reflector_: Reflector::new(),
guard: Cell::new(Guard::None),
header_list: DOMRefCell::new(HyperHeaders::new()),
}
}
pub fn new(global: &GlobalScope) -> Root<Headers> {
reflect_dom_object(box Headers::new_inherited(), global, HeadersWrap)<|fim▁hole|> pub fn Constructor(global: &GlobalScope, init: Option<HeadersInit>)
-> Fallible<Root<Headers>> {
let dom_headers_new = Headers::new(global);
dom_headers_new.fill(init)?;
Ok(dom_headers_new)
}
}
impl HeadersMethods for Headers {
// https://fetch.spec.whatwg.org/#concept-headers-append
fn Append(&self, name: ByteString, value: ByteString) -> ErrorResult {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors && !is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
let mut combined_value: Vec<u8> = vec![];
if let Some(v) = self.header_list.borrow().get_raw(&valid_name) {
combined_value = v[0].clone();
combined_value.push(b',');
}
combined_value.extend(valid_value.iter().cloned());
self.header_list.borrow_mut().set_raw(valid_name, vec![combined_value]);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-delete
fn Delete(&self, name: ByteString) -> ErrorResult {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 3
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 4
if self.guard.get() == Guard::RequestNoCors &&
!is_cors_safelisted_request_header(&valid_name, &b"invalid".to_vec()) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 6
self.header_list.borrow_mut().remove_raw(&valid_name);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-get
fn Get(&self, name: ByteString) -> Fallible<Option<ByteString>> {
// Step 1
let valid_name = &validate_name(name)?;
Ok(self.header_list.borrow().get_raw(&valid_name).map(|v| {
ByteString::new(v[0].clone())
}))
}
// https://fetch.spec.whatwg.org/#dom-headers-has
fn Has(&self, name: ByteString) -> Fallible<bool> {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
Ok(self.header_list.borrow_mut().get_raw(&valid_name).is_some())
}
// https://fetch.spec.whatwg.org/#dom-headers-set
fn Set(&self, name: ByteString, value: ByteString) -> Fallible<()> {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors && !is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
// https://fetch.spec.whatwg.org/#concept-header-list-set
self.header_list.borrow_mut().set_raw(valid_name, vec![valid_value]);
Ok(())
}
}
impl Headers {
// https://fetch.spec.whatwg.org/#concept-headers-fill
pub fn fill(&self, filler: Option<HeadersInit>) -> ErrorResult {
match filler {
// Step 1
Some(HeadersInit::Headers(h)) => {
for header in h.header_list.borrow().iter() {
self.Append(
ByteString::new(Vec::from(header.name())),
ByteString::new(Vec::from(header.value_string().into_bytes()))
)?;
}
Ok(())
},
// Step 2
Some(HeadersInit::ByteStringSequenceSequence(v)) => {
for mut seq in v {
if seq.len() == 2 {
let val = seq.pop().unwrap();
let name = seq.pop().unwrap();
self.Append(name, val)?;
} else {
return Err(Error::Type(
format!("Each header object must be a sequence of length 2 - found one with length {}",
seq.len())));
}
}
Ok(())
},
Some(HeadersInit::StringByteStringRecord(m)) => {
for (key, value) in m.iter() {
let key_vec = key.as_ref().to_string().into();
let headers_key = ByteString::new(key_vec);
self.Append(headers_key, value.clone())?;
}
Ok(())
},
None => Ok(()),
}
}
pub fn for_request(global: &GlobalScope) -> Root<Headers> {
let headers_for_request = Headers::new(global);
headers_for_request.guard.set(Guard::Request);
headers_for_request
}
pub fn for_response(global: &GlobalScope) -> Root<Headers> {
let headers_for_response = Headers::new(global);
headers_for_response.guard.set(Guard::Response);
headers_for_response
}
pub fn set_guard(&self, new_guard: Guard) {
self.guard.set(new_guard)
}
pub fn get_guard(&self) -> Guard {
self.guard.get()
}
pub fn empty_header_list(&self) {
*self.header_list.borrow_mut() = HyperHeaders::new();
}
pub fn set_headers(&self, hyper_headers: HyperHeaders) {
*self.header_list.borrow_mut() = hyper_headers;
}
// https://fetch.spec.whatwg.org/#concept-header-extract-mime-type
pub fn extract_mime_type(&self) -> Vec<u8> {
self.header_list.borrow().get_raw("content-type").map_or(vec![], |v| v[0].clone())
}
pub fn sort_header_list(&self) -> Vec<(String, String)> {
let borrowed_header_list = self.header_list.borrow();
let headers_iter = borrowed_header_list.iter();
let mut header_vec = vec![];
for header in headers_iter {
let name = header.name().to_string();
let value = header.value_string();
let name_value = (name, value);
header_vec.push(name_value);
}
header_vec.sort();
header_vec
}
}
impl Iterable for Headers {
type Key = ByteString;
type Value = ByteString;
fn get_iterable_length(&self) -> u32 {
self.header_list.borrow().iter().count() as u32
}
fn get_value_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let value = sorted_header_vec[n as usize].1.clone();
ByteString::new(value.into_bytes().to_vec())
}
fn get_key_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let key = sorted_header_vec[n as usize].0.clone();
ByteString::new(key.into_bytes().to_vec())
}
}
fn is_cors_safelisted_request_content_type(value: &[u8]) -> bool {
let value_string = if let Ok(s) = str::from_utf8(value) {
s
} else {
return false;
};
let value_mime_result: Result<Mime, _> = value_string.parse();
match value_mime_result {
Err(_) => false,
Ok(value_mime) => {
match value_mime {
Mime(TopLevel::Application, SubLevel::WwwFormUrlEncoded, _) |
Mime(TopLevel::Multipart, SubLevel::FormData, _) |
Mime(TopLevel::Text, SubLevel::Plain, _) => true,
_ => false,
}
}
}
}
// TODO: "DPR", "Downlink", "Save-Data", "Viewport-Width", "Width":
// ... once parsed, the value should not be failure.
// https://fetch.spec.whatwg.org/#cors-safelisted-request-header
fn is_cors_safelisted_request_header(name: &str, value: &[u8]) -> bool {
match name {
"accept" |
"accept-language" |
"content-language" => true,
"content-type" => is_cors_safelisted_request_content_type(value),
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-response-header-name
fn is_forbidden_response_header(name: &str) -> bool {
match name {
"set-cookie" |
"set-cookie2" => true,
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-header-name
pub fn is_forbidden_header_name(name: &str) -> bool {
let disallowed_headers =
["accept-charset", "accept-encoding",
"access-control-request-headers",
"access-control-request-method",
"connection", "content-length",
"cookie", "cookie2", "date", "dnt",
"expect", "host", "keep-alive", "origin",
"referer", "te", "trailer", "transfer-encoding",
"upgrade", "via"];
let disallowed_header_prefixes = ["sec-", "proxy-"];
disallowed_headers.iter().any(|header| *header == name) ||
disallowed_header_prefixes.iter().any(|prefix| name.starts_with(prefix))
}
// There is some unresolved confusion over the definition of a name and a value.
// The fetch spec [1] defines a name as "a case-insensitive byte
// sequence that matches the field-name token production. The token
// productions are viewable in [2]." A field-name is defined as a
// token, which is defined in [3].
// ISSUE 1:
// It defines a value as "a byte sequence that matches the field-content token production."
// To note, there is a difference between field-content and
// field-value (which is made up of field-content and obs-fold). The
// current definition does not allow for obs-fold (which are white
// space and newlines) in values. So perhaps a value should be defined
// as "a byte sequence that matches the field-value token production."
// However, this would then allow values made up entirely of white space and newlines.
// RELATED ISSUE 2:
// According to a previously filed Errata ID: 4189 in [4], "the
// specified field-value rule does not allow single field-vchar
// surrounded by whitespace anywhere". They provided a fix for the
// field-content production, but ISSUE 1 has still not been resolved.
// The production definitions likely need to be re-written.
// [1] https://fetch.spec.whatwg.org/#concept-header-value
// [2] https://tools.ietf.org/html/rfc7230#section-3.2
// [3] https://tools.ietf.org/html/rfc7230#section-3.2.6
// [4] https://www.rfc-editor.org/errata_search.php?rfc=7230
fn validate_name_and_value(name: ByteString, value: ByteString)
-> Fallible<(String, Vec<u8>)> {
let valid_name = validate_name(name)?;
if !is_field_content(&value) {
return Err(Error::Type("Value is not valid".to_string()));
}
Ok((valid_name, value.into()))
}
fn validate_name(name: ByteString) -> Fallible<String> {
if !is_field_name(&name) {
return Err(Error::Type("Name is not valid".to_string()));
}
match String::from_utf8(name.into()) {
Ok(ns) => Ok(ns),
_ => Err(Error::Type("Non-UTF8 header name found".to_string())),
}
}
// Removes trailing and leading HTTP whitespace bytes.
// https://fetch.spec.whatwg.org/#concept-header-value-normalize
pub fn normalize_value(value: ByteString) -> ByteString {
match (index_of_first_non_whitespace(&value), index_of_last_non_whitespace(&value)) {
(Some(begin), Some(end)) => ByteString::new(value[begin..end + 1].to_owned()),
_ => ByteString::new(vec![]),
}
}
fn is_HTTP_whitespace(byte: u8) -> bool {
byte == b'\t' || byte == b'\n' || byte == b'\r' || byte == b' '
}
fn index_of_first_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate() {
if !is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
fn index_of_last_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate().rev() {
if !is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
// http://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_name(name: &ByteString) -> bool {
is_token(&*name)
}
// https://tools.ietf.org/html/rfc7230#section-3.2
// http://www.rfc-editor.org/errata_search.php?rfc=7230
// Errata ID: 4189
// field-content = field-vchar [ 1*( SP / HTAB / field-vchar )
// field-vchar ]
fn is_field_content(value: &ByteString) -> bool {
let value_len = value.len();
if value_len == 0 {
return false;
}
if !is_field_vchar(value[0]) {
return false;
}
if value_len > 2 {
for &ch in &value[1..value_len - 1] {
if !is_field_vchar(ch) && !is_space(ch) && !is_htab(ch) {
return false;
}
}
}
if !is_field_vchar(value[value_len - 1]) {
return false;
}
return true;
}
fn is_space(x: u8) -> bool {
x == b' '
}
fn is_htab(x: u8) -> bool {
x == b'\t'
}
// https://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_vchar(x: u8) -> bool {
is_vchar(x) || is_obs_text(x)
}
// https://tools.ietf.org/html/rfc5234#appendix-B.1
pub fn is_vchar(x: u8) -> bool {
match x {
0x21...0x7E => true,
_ => false,
}
}
// http://tools.ietf.org/html/rfc7230#section-3.2.6
pub fn is_obs_text(x: u8) -> bool {
match x {
0x80...0xFF => true,
_ => false,
}
}<|fim▁end|> | }
// https://fetch.spec.whatwg.org/#dom-headers |
<|file_name|>scrollview-list.js<|end_file_name|><|fim▁begin|><|fim▁hole|>size 3077<|fim▁end|> | version https://git-lfs.github.com/spec/v1
oid sha256:94e212e6fc0c837cd9fff7fca8feff0187a0a22a97c7bd4c6d8f05c5cc358519 |
<|file_name|>template.py<|end_file_name|><|fim▁begin|>#
# -*- coding: utf-8 -*-
# Authors: Daniel P. Berrange <[email protected]>
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import urlparse
import importlib
import re
class Template(object):
def __init__(self,
source, protocol,
hostname, port,
username, password,
path, params):
"""
:param source: template source name
:param protocol: network transport protocol or None
:param hostname: registry hostname or None
:param port: registry port or None
:param username: username or None
:param password: password or None
:param path: template path identifier
:param params: template parameters
docker:///ubuntu
<|fim▁hole|>
virt-builder:///fedora-20
"""
self.source = source
self.protocol = protocol
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.path = path
self.params = params
if self.params is None:
self.params = {}
@classmethod
def _get_source_impl(klass, source):
try:
p = re.compile("\W")
sourcemod = "".join(p.split(source))
sourcename = "".join([i.capitalize() for i in p.split(source)])
mod = importlib.import_module(
"libvirt_sandbox.image.sources." + sourcemod)
classname = sourcename + "Source"
classimpl = getattr(mod, classname)
return classimpl()
except Exception as e:
print e
raise Exception("Invalid source: '%s'" % source)
def get_source_impl(self):
if self.source == "":
raise Exception("Missing scheme in image URI")
return self._get_source_impl(self.source)
def __repr__(self):
if self.protocol is not None:
scheme = self.source + "+" + self.protocol
else:
scheme = self.source
if self.hostname:
if self.port:
netloc = "%s:%d" % (self.hostname, self.port)
else:
netloc = self.hostname
if self.username:
if self.password:
auth = self.username + ":" + self.password
else:
auth = self.username
netloc = auth + "@" + netloc
else:
netloc = None
query = "&".join([key + "=" + self.params[key] for key in self.params.keys()])
ret = urlparse.urlunparse((scheme, netloc, self.path, None, query, None))
return ret
@classmethod
def from_uri(klass, uri):
o = urlparse.urlparse(uri)
idx = o.scheme.find("+")
if idx == -1:
source = o.scheme
protocol = None
else:
source = o.scheme[0:idx]
protocol = o.scheme[idx + 1:]
query = {}
if o.query is not None and o.query != "":
for param in o.query.split("&"):
(key, val) = param.split("=")
query[key] = val
return klass(source, protocol,
o.hostname, o.port,
o.username, o.password,
o.path, query)
@classmethod
def get_all(klass, source, templatedir):
impl = klass._get_source_impl(source)
return impl.list_templates(templatedir)<|fim▁end|> | docker+https://index.docker.io/ubuntu?tag=latest |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
class ProductionDatasetsExec(models.Model):<|fim▁hole|> taskid = models.DecimalField(decimal_places=0, max_digits=10, db_column='TASK_ID', null=False, default=0)
status = models.CharField(max_length=12, db_column='STATUS', null=True)
phys_group = models.CharField(max_length=20, db_column='PHYS_GROUP', null=True)
events = models.DecimalField(decimal_places=0, max_digits=7, db_column='EVENTS', null=False, default=0)
class Meta:
app_label = "grisli"
managed = False
db_table = 'T_PRODUCTIONDATASETS_EXEC'
class TaskProdSys1(models.Model):
taskid = models.DecimalField(decimal_places=0, max_digits=10, db_column='REQID', primary_key=True)
total_events = models.DecimalField(decimal_places=0, max_digits=10, db_column='TOTAL_EVENTS')
task_name = models.CharField(max_length=130, db_column='TASKNAME')
status = models.CharField(max_length=12, db_column='STATUS')
class Meta:
app_label = "grisli"
managed = False
db_table = 'T_TASK_REQUEST'
class TRequest(models.Model):
request = models.CharField(max_length=200, db_column='REQUEST', null=True)<|fim▁end|> | name = models.CharField(max_length=200, db_column='NAME', primary_key=True) |
<|file_name|>test_drive_edit.py<|end_file_name|><|fim▁begin|>import os
import time
from nose.plugins.skip import SkipTest
from nxdrive.client import LocalClient
from nxdrive.tests.common import OS_STAT_MTIME_RESOLUTION
from nxdrive.client.common import LOCALLY_EDITED_FOLDER_NAME
from nxdrive.tests.common_unit_test import UnitTestCase
DRIVE_EDIT_XATTR_NAMES = ['ndrive', 'nxdriveedit', 'nxdriveeditdigest', 'nxdriveeditname']
class TestDriveEdit(UnitTestCase):
locally_edited_path = ('/default-domain/UserWorkspaces/'
+ 'nuxeoDriveTestUser-user-1/Collections/'
+ LOCALLY_EDITED_FOLDER_NAME)
def setUpApp(self):
super(TestDriveEdit, self).setUpApp()
self.drive_edit = self.manager_1.get_drive_edit()
self.drive_edit.driveEditUploadCompleted.connect(self.app.sync_completed)
self.drive_edit.start()
self.remote = self.remote_document_client_1
self.local = LocalClient(os.path.join(self.nxdrive_conf_folder_1, 'edit'))
def tearDownApp(self):
self.drive_edit.stop()
super(TestDriveEdit, self).tearDownApp()
def test_filename_encoding(self):
filename = u'Mode op\xe9ratoire.txt'
doc_id = self.remote.make_file('/', filename, 'Some content.')
# Linux / Win + Chrome: quoted utf-8 encoded
browser_filename = 'Mode%20op%C3%A9ratoire.txt'
self._drive_edit_update(doc_id, filename, browser_filename, 'Win + Chrome')
# Win + IE: unquoted utf-8 encoded
browser_filename = 'Mode op\xc3\xa9ratoire.txt'
self._drive_edit_update(doc_id, filename, browser_filename, 'Win + IE')
# Win + FF: quoted string containing unicode
browser_filename = 'Mode%20op\xe9ratoire.txt'
self._drive_edit_update(doc_id, filename, browser_filename, 'Win + FF')
# OSX + Chrome / OSX + FF: quoted utf-8 encoded, except for white spaces!
browser_filename = 'Mode op%C3%A9ratoire.txt'
self._drive_edit_update(doc_id, filename, browser_filename, 'OS X + Chrome or FF')
def _drive_edit_update(self, doc_id, filename, browser_filename, content):
# Download file
local_path = '/%s/%s' % (doc_id, filename)
self.drive_edit._prepare_edit(self.nuxeo_url, doc_id, browser_filename)
self.assertTrue(self.local.exists(local_path))
self.wait_sync(timeout=2, fail_if_timeout=False)
# Update file content
self.local.update_content(local_path, content)
self.wait_sync()
self.assertEquals(self.remote.get_content('/' + filename), content)
def test_drive_edit_non_synced_doc(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create file in test workspace (non sync root)
doc_id = remote.make_file('/', 'test.odt', 'Some content.')
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder<|fim▁hole|> self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Some content.')
# Check Locally Edited collection exists, is registered as a sync root
# for test user and file is member of it
self.assertTrue(self.root_remote_client.exists(
self.locally_edited_path))
sync_roots = remote.get_roots()
self.assertEquals(len(sync_roots), 1)
self.assertEquals(sync_roots[0].path, self.locally_edited_path)
self.assertTrue(doc_id in
self.root_remote_client.get_collection_members(
self.locally_edited_path))
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Updated content.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'), 'Updated content.')
# Drive edit file a second time (should not download a new file but
# detect the existing one)
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
self.assertEquals(len(local.get_children_info('/%s'
% LOCALLY_EDITED_FOLDER_NAME)), 1)
# Update locally edited file
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Twice updated content.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Twice updated content.')
def test_drive_edit_synced_doc(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
ctl.bind_root(self.local_nxdrive_folder_1, self.workspace)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create file in test workspace (sync root)
doc_id = remote.make_file('/', 'test.odt', 'Some content.')
# Launch first synchronization
self._sync(syn)
self.assertTrue(local.exists('/%s/test.odt' % self.workspace_title))
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Some content.')
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Content updated from Locally Edited.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Content updated from Locally Edited.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% self.workspace_title),
'Content updated from Locally Edited.')
# Update file in local sync root
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % self.workspace_title,
'Content updated from local sync root.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Content updated from local sync root.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Content updated from local sync root.')
# Update file in remote sync root
remote.update_content('/test.odt',
'Content updated from remote sync root.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% self.workspace_title),
'Content updated from remote sync root.')
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Content updated from remote sync root.')
def test_drive_edit_doc_becoming_synced(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create file in test workspace (non sync root)
doc_id = remote.make_file('/', 'test.odt', 'Some content.')
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
# Register test workspace as a sync root
ctl.bind_root(self.local_nxdrive_folder_1, self.workspace)
self._sync(syn)
self.assertTrue(local.exists('/%s/test.odt' % self.workspace_title))
# Update file in local sync root
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % self.workspace_title,
'Content updated from local sync root.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Content updated from local sync root.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Content updated from local sync root.')
# Update locally edited file
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Content updated from Locally Edited.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Content updated from Locally Edited.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% self.workspace_title),
'Content updated from Locally Edited.')
# Update file in remote sync root
remote.update_content('/test.odt',
'Content updated from remote sync root.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% self.workspace_title),
'Content updated from remote sync root.')
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Content updated from remote sync root.')
def test_drive_edit_remote_move_non_sync_root_to_sync_root(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-184")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create file in test workspace (non sync root)
doc_id = remote.make_file('/', 'test.odt', 'Some content.')
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Updated content.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'), 'Updated content.')
# Register a folder as sync root and remotely move file to it
sync_root_id = remote.make_folder('/', 'syncRoot')
ctl.bind_root(self.local_nxdrive_folder_1, sync_root_id)
self._sync(syn)
self.assertTrue(local.exists('/syncRoot'))
remote.move('/test.odt', '/syncRoot')
self._sync(syn)
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertTrue(local.exists('/syncRoot/test.odt'))
def test_drive_edit_remote_move_sync_root_to_non_sync_root(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create folder, register it as a sync root and create file inside it
sync_root_id = remote.make_folder('/', 'syncRoot')
ctl.bind_root(self.local_nxdrive_folder_1, sync_root_id)
doc_id = remote.make_file(sync_root_id, 'test.odt', 'Some content.')
# Launch first synchronization
self._sync(syn)
self.assertTrue(local.exists('/syncRoot/test.odt'))
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Content updated from Locally Edited.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/syncRoot/test.odt'),
'Content updated from Locally Edited.')
self._sync(syn)
self.assertEquals(local.get_content('/syncRoot/test.odt'),
'Content updated from Locally Edited.')
# Move file to non sync root workspace
remote.move('/syncRoot/test.odt', self.workspace)
self._sync(syn)
self.assertFalse(local.exists('/syncRoot/test.odt'))
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(len(local.get_children_info('/%s'
% LOCALLY_EDITED_FOLDER_NAME)), 1)
def test_drive_edit_move_sync_root_to_sync_root(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create 2 folders, register them as sync roots and create file inside first folder
sync_root_id1 = remote.make_folder('/', 'syncRoot1')
sync_root_id2 = remote.make_folder('/', 'syncRoot2')
ctl.bind_root(self.local_nxdrive_folder_1, sync_root_id1)
ctl.bind_root(self.local_nxdrive_folder_1, sync_root_id2)
doc_id = remote.make_file(sync_root_id1, 'test.odt', 'Some content.')
# Launch first synchronization
self._sync(syn)
self.assertTrue(local.exists('/syncRoot1/test.odt'))
self.assertTrue(local.exists('/syncRoot2'))
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Content updated from Locally Edited.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/syncRoot1/test.odt'),
'Content updated from Locally Edited.')
self._sync(syn)
self.assertEquals(local.get_content('/syncRoot1/test.odt'),
'Content updated from Locally Edited.')
# Remotely move file to other sync root
remote.move('/syncRoot1/test.odt', '/syncRoot2')
self._sync(syn)
self.assertFalse(local.exists('/syncRoot1/test.odt'))
self.assertTrue(local.exists('/syncRoot2/test.odt'))
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(len(local.get_children_info('/%s'
% LOCALLY_EDITED_FOLDER_NAME)), 1)
# Locally move back file to other sync root
local.move('/syncRoot2/test.odt', '/syncRoot1')
self._sync(syn, wait_for_async=False)
self.assertFalse(local.exists('/syncRoot2/test.odt'))
self.assertTrue(local.exists('/syncRoot1/test.odt'))
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(len(local.get_children_info('/%s'
% LOCALLY_EDITED_FOLDER_NAME)), 1)
def _sync(self, syn, wait_for_async=True):
if wait_for_async:
self.wait()
syn.loop(delay=0, max_loops=1)<|fim▁end|> | self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.