prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>red.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# Example using an RGB character LCD wired directly to Raspberry Pi or BeagleBone Black.
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi configuration:
<|fim▁hole|>lcd_rs = 27 # Change this to pin 21 on older revision Raspberry Pi's
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_red = 4
lcd_green = 17
lcd_blue = 7 # Pin 7 is CE1
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_RGBCharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_red, lcd_green, lcd_blue)
# Show some basic colors.
lcd.set_color(1.0, 0.0, 0.0)
lcd.clear()
lcd.message('Joyeux')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 0.0)
lcd.clear()
lcd.message('Noel')
time.sleep(3.0)
lcd.set_color(0.0, 0.0, 1.0)
lcd.clear()
lcd.message('Je vais')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 0.0)
lcd.clear()
lcd.message('te faire')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 1.0)
lcd.clear()
lcd.message('des trucs')
time.sleep(3.0)
lcd.set_color(1.0, 0.0, 1.0)
lcd.clear()
lcd.message('de fou')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 1.0)
lcd.clear()
lcd.message('MOUAHHH')
time.sleep(3.0)<|fim▁end|>
| |
<|file_name|>read_depth_stencil.py<|end_file_name|><|fim▁begin|>'''OpenGL extension NV.read_depth_stencil
This module customises the behaviour of the
OpenGL.raw.GLES2.NV.read_depth_stencil to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/read_depth_stencil.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes<|fim▁hole|>from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.read_depth_stencil import *
from OpenGL.raw.GLES2.NV.read_depth_stencil import _EXTENSION_NAME
def glInitReadDepthStencilNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION<|fim▁end|>
| |
<|file_name|>Binary2.java<|end_file_name|><|fim▁begin|>package com.bytezone.diskbrowser.nufx;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import com.bytezone.diskbrowser.prodos.write.DiskFullException;
import com.bytezone.diskbrowser.prodos.write.FileAlreadyExistsException;
import com.bytezone.diskbrowser.prodos.write.ProdosDisk;
import com.bytezone.diskbrowser.prodos.write.VolumeCatalogFullException;
// -----------------------------------------------------------------------------------//
public class Binary2
// -----------------------------------------------------------------------------------//
{
private static final String UNDERLINE =
"------------------------------------------------------"
+ "-----------------------";
Binary2Header binary2Header;
byte[] buffer;
List<Binary2Header> headers = new ArrayList<> ();<|fim▁hole|> public Binary2 (Path path) throws IOException
// ---------------------------------------------------------------------------------//
{
fileName = path.toFile ().getName ();
buffer = Files.readAllBytes (path);
read (buffer);
}
// ---------------------------------------------------------------------------------//
private void read (byte[] buffer)
// ---------------------------------------------------------------------------------//
{
int ptr = 0;
do
{
binary2Header = new Binary2Header (buffer, ptr);
System.out.println (binary2Header);
headers.add (binary2Header);
totalBlocks += binary2Header.totalBlocks;
ptr += ((binary2Header.eof - 1) / 128 + 1) * 128 + 128;
} while (binary2Header.filesToFollow > 0);
}
// ---------------------------------------------------------------------------------//
public byte[] getDiskBuffer () throws DiskFullException, VolumeCatalogFullException,
FileAlreadyExistsException, IOException
// ---------------------------------------------------------------------------------//
{
ProdosDisk disk = new ProdosDisk (800, "DiskBrowser");
for (Binary2Header header : headers)
{
byte[] dataBuffer = new byte[header.eof]; // this sux
System.arraycopy (buffer, header.ptr + 128, dataBuffer, 0, dataBuffer.length);
disk.addFile (header.fileName, header.fileType, header.auxType, header.created,
header.modified, dataBuffer, header.eof);
}
disk.close ();
return disk.getBuffer ();
}
// ---------------------------------------------------------------------------------//
@Override
public String toString ()
// ---------------------------------------------------------------------------------//
{
StringBuilder text = new StringBuilder ();
text.append (String.format (
" %-15.15s Files:%5d%n%n",
fileName, headers.size ()));
text.append (" Name Type Auxtyp Modified"
+ " Fmat Length\n");
text.append (String.format ("%s%n", UNDERLINE));
for (Binary2Header header : headers)
text.append (String.format ("%s%n", header.getLine ()));
text.append (String.format ("%s%n", UNDERLINE));
return text.toString ();
}
}<|fim▁end|>
|
int totalBlocks;
String fileName;
// ---------------------------------------------------------------------------------//
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import React, { Component } from 'react';
import {
AppRegistry,
StyleSheet,
Text,
View,
TouchableOpacity,
} from 'react-native';
import ScrollableTabView, { ScrollableTabBar, } from 'react-native-scrollable-tab-view';
class wsapp extends Component {
render() {
return <ScrollableTabView
style={{marginTop: 20, }}<|fim▁hole|> initialPage={2}
scrollWithoutAnimation={false}
renderTabBar={() => <ScrollableTabBar />}
ref={(tabView) => { this.tabView = tabView; }}>
<Text tabLabel='Tab #1'>My</Text>
<Text tabLabel='Tab #2'>favorite</Text>
<Text tabLabel='Tab #3'>project</Text>
<TouchableOpacity tabLabel='Back' onPress={() => this.tabView.goToPage(0)}>
<Text>Lets go back!</Text>
</TouchableOpacity>
</ScrollableTabView>
}
}
export default wsapp<|fim▁end|>
| |
<|file_name|>Packet_LOCAL_INFILE_Request_test.go<|end_file_name|><|fim▁begin|><|fim▁hole|>import "testing"
import "github.com/stretchr/testify/assert"
var LOCAL_INFILE_Request_test_packets = []struct {
packet Proto
context Context
}{
{packet: Proto{data: StringToPacket(`
0c 00 00 01 fb 2f 65 74 63 2f 70 61 73 73 77 64 ...../etc/passwd
`)}, context: Context{}},
}
func Test_Packet_LOCAL_INFILE_Request(t *testing.T) {
var pkt Packet_LOCAL_INFILE_Request
for _, value := range LOCAL_INFILE_Request_test_packets {
pkt = Packet_LOCAL_INFILE_Request{}
pkt.FromPacket(value.context, value.packet)
assert.Equal(t, pkt.ToPacket(value.context), value.packet.data, "")
}
}
func Benchmark_Packet_LOCAL_INFILE_Request_FromPacket(b *testing.B) {
context := Context{capability: CLIENT_PROTOCOL_41}
var pkt Packet_LOCAL_INFILE_Request
for i := 0; i < b.N; i++ {
pkt = Packet_LOCAL_INFILE_Request{}
LOCAL_INFILE_Request_test_packets[0].packet.offset = 0
pkt.FromPacket(context, LOCAL_INFILE_Request_test_packets[0].packet)
}
}
func Benchmark_Packet_LOCAL_INFILE_Request_GetPacketSize(b *testing.B) {
context := Context{capability: CLIENT_PROTOCOL_41}
pkt := Packet_LOCAL_INFILE_Request{}
pkt.FromPacket(context, LOCAL_INFILE_Request_test_packets[0].packet)
for i := 0; i < b.N; i++ {
pkt.GetPacketSize(context)
}
}
func Benchmark_Packet_LOCAL_INFILE_Request_ToPacket(b *testing.B) {
context := Context{capability: CLIENT_PROTOCOL_41}
pkt := Packet_LOCAL_INFILE_Request{}
pkt.FromPacket(context, LOCAL_INFILE_Request_test_packets[0].packet)
for i := 0; i < b.N; i++ {
pkt.ToPacket(context)
}
}<|fim▁end|>
|
package MySQLProtocol
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var Filter = require('broccoli-filter')
module.exports = WrapFilter;
WrapFilter.prototype = Object.create(Filter.prototype);
WrapFilter.prototype.constructor = WrapFilter;
function WrapFilter (inputTree, options) {
if (!(this instanceof WrapFilter)) return new WrapFilter(inputTree, options)
Filter.call(this, inputTree, options)
this.options = options || {};
this.options.extensions = this.options.extensions || ['js'];
this.extensions = this.options.extensions;
}
WrapFilter.prototype.processString = function (string) {
var wrapper = this.options.wrapper;
if ( !(wrapper instanceof Array) ) {
return string;
}
var startWith = wrapper[0] || '';
var endWith = wrapper[1] || '';<|fim▁hole|> return [startWith, string, endWith].join('')
}<|fim▁end|>
| |
<|file_name|>index_ttl.go<|end_file_name|><|fim▁begin|>package storage
import (
"fmt"
"strings"
"time"
mgo "github.com/ilius/mgo"
"github.com/ilius/mgo/bson"
)
func ModifyIndexTTL(db mgo.Database, collection string, index mgo.Index) error {
keyInfo, err := mgo.ParseIndexKey(index.Key)
if err != nil {
return err
}
expireAfterSeconds := int(index.ExpireAfter / time.Second)
fmt.Printf(
"Updating TTL on collection %s to expireAfterSeconds=%d\n",
collection,
expireAfterSeconds,
)
err = db.Run(bson.D{
{"collMod", collection},
{"index", bson.M{
"keyPattern": keyInfo.Key,<|fim▁hole|> "expireAfterSeconds": expireAfterSeconds,
}},
}, nil)
if err != nil {
return err
}
return nil
}
func EnsureIndexWithTTL(db mgo.Database, collection string, index mgo.Index) error {
err := db.C(collection).EnsureIndex(index)
if err != nil {
// if expireAfterSeconds is changed, we need to drop and re-create the index
// unless we use `collMod` added in 2.3.2
// https://jira.mongodb.org/browse/SERVER-6700
if strings.Contains(err.Error(), "already exists with different options") {
return ModifyIndexTTL(db, collection, index)
}
return err
}
return nil
}<|fim▁end|>
| |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>import random
import time
import datetime
from consts import *
__all__ = ['gen_valid_id', 'gen_list_page', 'log']
def gen_valid_id(collection):
def gen_id():
_id = ''
for i in range(4):
_id += random.choice('0123456789')
return _id
id = gen_id()
while collection.find_one({'id': id}):
id = gen_id()
return id
def gen_list_page(collection, status, page=1):
page = int(page)
left = (page - 1) * 15
right = left + 15
all = collection.find({'status': status}).sort([('id', 1)])
max_page = int((all.count()-1) / 15) + 1 if all.count() else 0
if page > max_page:
return PAGE_NOT_EXIST
elif page < 1:
return ARGS_INCORRECT
header = '===== {0}/{1} =====\n'.format(page, max_page)
selected = all[left:right]
return header + '\n'.join([
'{id} {title} ({comment})'.format(**i) for i in selected])
def log(m):
with open('log', 'a') as f:
if m.type == 'text': exp=m.content
elif m.type == 'image': exp=m.img
elif m.type == 'link': exp=';'.join([m.title, m.description, m.url])
else: exp=str(dict(m))
f.write(LOG.format(datetime.datetime.fromtimestamp(
time.time()).strftime('%Y-%m-%d %H:%M:%S'), m.source, m.type, exp))<|fim▁hole|> from pymongo import MongoClient
collection = MongoClient()['SongsDistributor']['collection']
for i in ('checked', 'pending'):
collection.update_many({'status': i}, {'$set': {key: value}})
print('ok')<|fim▁end|>
|
def add_key(key, value):
|
<|file_name|>sp_head.cc<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2002, 2016, Oracle and/or its affiliates.
Copyright (c) 2011, 2020, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
#include "mariadb.h" /* NO_EMBEDDED_ACCESS_CHECKS */
#include "sql_priv.h"
#include "unireg.h"
#include "sql_prepare.h"
#include "sql_cache.h" // query_cache_*
#include "probes_mysql.h"
#include "sql_show.h" // append_identifier
#include "sql_db.h" // mysql_opt_change_db, mysql_change_db
#include "sql_array.h" // Dynamic_array
#include "log_event.h" // Query_log_event
#include "sql_derived.h" // mysql_handle_derived
#include "sql_cte.h"
#include "sql_select.h" // Virtual_tmp_table
#include "opt_trace.h"
#include "my_json_writer.h"
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation
#endif
#include "sp_head.h"
#include "sp.h"
#include "sp_pcontext.h"
#include "sp_rcontext.h"
#include "sp_cache.h"
#include "set_var.h"
#include "sql_parse.h" // cleanup_items
#include "sql_base.h" // close_thread_tables
#include "transaction.h" // trans_commit_stmt
#include "sql_audit.h"
#include "debug_sync.h"
#ifdef WITH_WSREP
#include "wsrep_trans_observer.h"
#endif /* WITH_WSREP */
/*
Sufficient max length of printed destinations and frame offsets (all uints).
*/
#define SP_INSTR_UINT_MAXLEN 8
#define SP_STMT_PRINT_MAXLEN 40
#include <my_user.h>
#include "mysql/psi/mysql_statement.h"
#include "mysql/psi/mysql_sp.h"
#ifdef HAVE_PSI_INTERFACE
void init_sp_psi_keys()
{
const char *category= "sp";
const int num __attribute__((unused)) = __LINE__ + 3;
PSI_server->register_statement(category, & sp_instr_stmt::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_set::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_set_trigger_field::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_jump::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_jump_if_not::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_freturn::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_preturn::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_hpush_jump::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_hpop::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_hreturn::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_cpush::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_cpop::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_copen::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_cclose::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_cfetch::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_agg_cfetch::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_cursor_copy_struct::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_error::psi_info, 1);
PSI_server->register_statement(category, & sp_instr_set_case_expr::psi_info, 1);
DBUG_ASSERT(SP_PSI_STATEMENT_INFO_COUNT == __LINE__ - num);
}
#endif
#ifdef HAVE_PSI_SP_INTERFACE
#define MYSQL_RUN_SP(SP,CODE) \
do { \
PSI_sp_locker_state psi_state; \
PSI_sp_locker *locker= MYSQL_START_SP(&psi_state, (SP)->m_sp_share); \
CODE; \
MYSQL_END_SP(locker); \
} while(0)
#else
#define MYSQL_RUN_SP(SP, CODE) do { CODE; } while(0)
#endif
extern "C" uchar *sp_table_key(const uchar *ptr, size_t *plen, my_bool first);
/**
Helper function which operates on a THD object to set the query start_time to
the current time.
@param[in, out] thd The session object
*/
static void reset_start_time_for_sp(THD *thd)
{
if (!thd->in_sub_stmt)
thd->set_start_time();
}
bool Item_splocal::append_for_log(THD *thd, String *str)
{
if (fix_fields_if_needed(thd, NULL))
return true;
if (limit_clause_param)
return str->append_ulonglong(val_uint());
/*
ROW variables are currently not allowed in select_list, e.g.:
SELECT row_variable;
ROW variables can appear in query parts where name is not important, e.g.:
SELECT ROW(1,2)=row_variable FROM t1;
So we can skip using NAME_CONST() and use ROW() constants directly.
*/
if (type_handler() == &type_handler_row)
return append_value_for_log(thd, str);
if (str->append(STRING_WITH_LEN(" NAME_CONST('")) ||
str->append(&m_name) ||
str->append(STRING_WITH_LEN("',")))
return true;
return append_value_for_log(thd, str) || str->append(')');
}
bool Item_splocal::append_value_for_log(THD *thd, String *str)
{
StringBuffer<STRING_BUFFER_USUAL_SIZE> str_value_holder(&my_charset_latin1);
Item *item= this_item();
String *str_value= item->type_handler()->print_item_value(thd, item,
&str_value_holder);
return (str_value ?
str->append(*str_value) :
str->append(NULL_clex_str));
}
bool Item_splocal_row_field::append_for_log(THD *thd, String *str)
{
if (fix_fields_if_needed(thd, NULL))
return true;
if (limit_clause_param)
return str->append_ulonglong(val_uint());
if (str->append(STRING_WITH_LEN(" NAME_CONST('")) ||
str->append(&m_name) ||
str->append('.') ||
str->append(&m_field_name) ||
str->append(STRING_WITH_LEN("',")))
return true;
return append_value_for_log(thd, str) || str->append(')');
}
/**
Returns a combination of:
- sp_head::MULTI_RESULTS: added if the 'cmd' is a command that might
result in multiple result sets being sent back.
- sp_head::CONTAINS_DYNAMIC_SQL: added if 'cmd' is one of PREPARE,
EXECUTE, DEALLOCATE.
*/
uint
sp_get_flags_for_command(LEX *lex)
{
uint flags;
switch (lex->sql_command) {
case SQLCOM_SELECT:
if (lex->result && !lex->analyze_stmt)
{
flags= 0; /* This is a SELECT with INTO clause */
break;
}
/* fallthrough */
case SQLCOM_ANALYZE:
case SQLCOM_OPTIMIZE:
case SQLCOM_PRELOAD_KEYS:
case SQLCOM_ASSIGN_TO_KEYCACHE:
case SQLCOM_CHECKSUM:
case SQLCOM_CHECK:
case SQLCOM_HA_READ:
case SQLCOM_SHOW_AUTHORS:
case SQLCOM_SHOW_BINLOGS:
case SQLCOM_SHOW_BINLOG_EVENTS:
case SQLCOM_SHOW_RELAYLOG_EVENTS:
case SQLCOM_SHOW_CHARSETS:
case SQLCOM_SHOW_COLLATIONS:
case SQLCOM_SHOW_CONTRIBUTORS:
case SQLCOM_SHOW_CREATE:
case SQLCOM_SHOW_CREATE_DB:
case SQLCOM_SHOW_CREATE_FUNC:
case SQLCOM_SHOW_CREATE_PROC:
case SQLCOM_SHOW_CREATE_PACKAGE:
case SQLCOM_SHOW_CREATE_PACKAGE_BODY:
case SQLCOM_SHOW_CREATE_EVENT:
case SQLCOM_SHOW_CREATE_TRIGGER:
case SQLCOM_SHOW_CREATE_USER:
case SQLCOM_SHOW_DATABASES:
case SQLCOM_SHOW_ERRORS:
case SQLCOM_SHOW_EXPLAIN:
case SQLCOM_SHOW_FIELDS:
case SQLCOM_SHOW_FUNC_CODE:
case SQLCOM_SHOW_GENERIC:
case SQLCOM_SHOW_GRANTS:
case SQLCOM_SHOW_ENGINE_STATUS:
case SQLCOM_SHOW_ENGINE_LOGS:
case SQLCOM_SHOW_ENGINE_MUTEX:
case SQLCOM_SHOW_EVENTS:
case SQLCOM_SHOW_KEYS:
case SQLCOM_SHOW_BINLOG_STAT:
case SQLCOM_SHOW_OPEN_TABLES:
case SQLCOM_SHOW_PRIVILEGES:
case SQLCOM_SHOW_PROCESSLIST:
case SQLCOM_SHOW_PROC_CODE:
case SQLCOM_SHOW_PACKAGE_BODY_CODE:
case SQLCOM_SHOW_SLAVE_HOSTS:
case SQLCOM_SHOW_SLAVE_STAT:
case SQLCOM_SHOW_STATUS:
case SQLCOM_SHOW_STATUS_FUNC:
case SQLCOM_SHOW_STATUS_PROC:
case SQLCOM_SHOW_STATUS_PACKAGE:
case SQLCOM_SHOW_STATUS_PACKAGE_BODY:
case SQLCOM_SHOW_STORAGE_ENGINES:
case SQLCOM_SHOW_TABLES:
case SQLCOM_SHOW_TABLE_STATUS:
case SQLCOM_SHOW_VARIABLES:
case SQLCOM_SHOW_WARNS:
case SQLCOM_REPAIR:
flags= sp_head::MULTI_RESULTS;
break;
/*
EXECUTE statement may return a result set, but doesn't have to.
We can't, however, know it in advance, and therefore must add
this statement here. This is ok, as is equivalent to a result-set
statement within an IF condition.
*/
case SQLCOM_EXECUTE:
case SQLCOM_EXECUTE_IMMEDIATE:
flags= sp_head::MULTI_RESULTS | sp_head::CONTAINS_DYNAMIC_SQL;
break;
case SQLCOM_PREPARE:
case SQLCOM_DEALLOCATE_PREPARE:
flags= sp_head::CONTAINS_DYNAMIC_SQL;
break;
case SQLCOM_CREATE_TABLE:
case SQLCOM_CREATE_SEQUENCE:
if (lex->tmp_table())
flags= 0;
else
flags= sp_head::HAS_COMMIT_OR_ROLLBACK;
break;
case SQLCOM_DROP_TABLE:
case SQLCOM_DROP_SEQUENCE:
if (lex->tmp_table())
flags= 0;
else
flags= sp_head::HAS_COMMIT_OR_ROLLBACK;
break;
case SQLCOM_FLUSH:
flags= sp_head::HAS_SQLCOM_FLUSH;
break;
case SQLCOM_RESET:
flags= sp_head::HAS_SQLCOM_RESET;
break;
case SQLCOM_CREATE_INDEX:
case SQLCOM_CREATE_DB:
case SQLCOM_CREATE_PACKAGE:
case SQLCOM_CREATE_PACKAGE_BODY:
case SQLCOM_CREATE_VIEW:
case SQLCOM_CREATE_TRIGGER:
case SQLCOM_CREATE_USER:
case SQLCOM_CREATE_ROLE:
case SQLCOM_ALTER_TABLE:
case SQLCOM_ALTER_SEQUENCE:
case SQLCOM_ALTER_USER:
case SQLCOM_GRANT:
case SQLCOM_GRANT_ROLE:
case SQLCOM_REVOKE:
case SQLCOM_REVOKE_ROLE:
case SQLCOM_BEGIN:
case SQLCOM_RENAME_TABLE:
case SQLCOM_RENAME_USER:
case SQLCOM_DROP_INDEX:
case SQLCOM_DROP_DB:
case SQLCOM_DROP_PACKAGE:
case SQLCOM_DROP_PACKAGE_BODY:
case SQLCOM_REVOKE_ALL:
case SQLCOM_DROP_USER:
case SQLCOM_DROP_ROLE:
case SQLCOM_DROP_VIEW:
case SQLCOM_DROP_TRIGGER:
case SQLCOM_TRUNCATE:
case SQLCOM_COMMIT:
case SQLCOM_ROLLBACK:
case SQLCOM_LOAD:
case SQLCOM_LOCK_TABLES:
case SQLCOM_CREATE_PROCEDURE:
case SQLCOM_CREATE_SPFUNCTION:
case SQLCOM_ALTER_PROCEDURE:
case SQLCOM_ALTER_FUNCTION:
case SQLCOM_DROP_PROCEDURE:
case SQLCOM_DROP_FUNCTION:
case SQLCOM_CREATE_EVENT:
case SQLCOM_ALTER_EVENT:
case SQLCOM_DROP_EVENT:
case SQLCOM_INSTALL_PLUGIN:
case SQLCOM_UNINSTALL_PLUGIN:
flags= sp_head::HAS_COMMIT_OR_ROLLBACK;
break;
case SQLCOM_DELETE:
case SQLCOM_DELETE_MULTI:
case SQLCOM_INSERT:
case SQLCOM_REPLACE:
case SQLCOM_REPLACE_SELECT:
case SQLCOM_INSERT_SELECT:
{
/*
DELETE normally doesn't return resultset, but there are 3 exceptions:
- DELETE ... RETURNING
- EXPLAIN DELETE ...
- ANALYZE DELETE ...
*/
if (!lex->has_returning() && !lex->describe && !lex->analyze_stmt)
flags= 0;
else
flags= sp_head::MULTI_RESULTS;
break;
}
case SQLCOM_UPDATE:
case SQLCOM_UPDATE_MULTI:
{
if (!lex->describe && !lex->analyze_stmt)
flags= 0;
else
flags= sp_head::MULTI_RESULTS;
break;
}
default:
flags= 0;
break;
}
return flags;
}
/**
Prepare an Item for evaluation (call of fix_fields).
@param it_addr pointer on item refernce
@param cols expected number of elements (1 for scalar, >=1 for ROWs)
@retval
NULL error
@retval
non-NULL prepared item
*/
Item *THD::sp_prepare_func_item(Item **it_addr, uint cols)
{
DBUG_ENTER("THD::sp_prepare_func_item");
Item *res= sp_fix_func_item(it_addr);
if (res && res->check_cols(cols))
DBUG_RETURN(NULL);
DBUG_RETURN(res);
}
/**
Fix an Item for evaluation for SP.
*/
Item *THD::sp_fix_func_item(Item **it_addr)
{
DBUG_ENTER("THD::sp_fix_func_item");
if ((*it_addr)->fix_fields_if_needed(this, it_addr))
{
DBUG_PRINT("info", ("fix_fields() failed"));
DBUG_RETURN(NULL);
}
it_addr= (*it_addr)->this_item_addr(this, it_addr);
if ((*it_addr)->fix_fields_if_needed(this, it_addr))
{
DBUG_PRINT("info", ("fix_fields() failed"));
DBUG_RETURN(NULL);
}
DBUG_RETURN(*it_addr);
}
/**
Evaluate an expression and store the result in the field.
@param result_field the field to store the result
@param expr_item_ptr the root item of the expression
@retval
FALSE on success
@retval
TRUE on error
*/
bool THD::sp_eval_expr(Field *result_field, Item **expr_item_ptr)
{
DBUG_ENTER("THD::sp_eval_expr");
DBUG_ASSERT(*expr_item_ptr);
Sp_eval_expr_state state(this);
/* Save the value in the field. Convert the value if needed. */
DBUG_RETURN(result_field->sp_prepare_and_store_item(this, expr_item_ptr));
}
/**
Create temporary sp_name object from MDL key.
@note The lifetime of this object is bound to the lifetime of the MDL_key.
This should be fine as sp_name objects created by this constructor
are mainly used for SP-cache lookups.
@param key MDL key containing database and routine name.
@param qname_buff Buffer to be used for storing quoted routine name
(should be at least 2*NAME_LEN+1+1 bytes).
*/
sp_name::sp_name(const MDL_key *key, char *qname_buff)
:Database_qualified_name(key->db_name(), key->db_name_length(),
key->name(), key->name_length()),
m_explicit_name(false)
{
if (m_db.length)
strxmov(qname_buff, m_db.str, ".", m_name.str, NullS);
else
strmov(qname_buff, m_name.str);
}
/**
Check that the name 'ident' is ok. It's assumed to be an 'ident'
from the parser, so we only have to check length and trailing spaces.
The former is a standard requirement (and 'show status' assumes a
non-empty name), the latter is a mysql:ism as trailing spaces are
removed by get_field().
@retval
TRUE bad name
@retval
FALSE name is ok
*/
bool
check_routine_name(const LEX_CSTRING *ident)
{
DBUG_ASSERT(ident);
DBUG_ASSERT(ident->str);
if (!ident->str[0] || ident->str[ident->length-1] == ' ')
{
my_error(ER_SP_WRONG_NAME, MYF(0), ident->str);
return TRUE;
}
if (check_ident_length(ident))
return TRUE;
return FALSE;
}
/*
*
* sp_head
*
*/
sp_head *sp_head::create(sp_package *parent, const Sp_handler *handler,
enum_sp_aggregate_type agg_type)
{
MEM_ROOT own_root;
init_sql_alloc(key_memory_sp_head_main_root, &own_root, MEM_ROOT_BLOCK_SIZE,
MEM_ROOT_PREALLOC, MYF(0));
sp_head *sp;
if (!(sp= new (&own_root) sp_head(&own_root, parent, handler, agg_type)))
free_root(&own_root, MYF(0));
return sp;
}
void sp_head::destroy(sp_head *sp)
{
if (sp)
{
/* Make a copy of main_mem_root as free_root will free the sp */
MEM_ROOT own_root= sp->main_mem_root;
DBUG_PRINT("info", ("mem_root %p moved to %p",
&sp->mem_root, &own_root));
delete sp;
free_root(&own_root, MYF(0));
}
}
/*
*
* sp_head
*
*/
sp_head::sp_head(MEM_ROOT *mem_root_arg, sp_package *parent,
const Sp_handler *sph, enum_sp_aggregate_type agg_type)
:Query_arena(NULL, STMT_INITIALIZED_FOR_SP),
Database_qualified_name(&null_clex_str, &null_clex_str),
main_mem_root(*mem_root_arg),
m_parent(parent),
m_handler(sph),
m_flags(0),
m_tmp_query(NULL),
m_explicit_name(false),
/*
FIXME: the only use case when name is NULL is events, and it should
be rewritten soon. Remove the else part and replace 'if' with
an assert when this is done.
*/
m_qname(null_clex_str),
m_params(null_clex_str),
m_body(null_clex_str),
m_body_utf8(null_clex_str),
m_defstr(null_clex_str),
m_sp_cache_version(0),
m_creation_ctx(0),
unsafe_flags(0),
m_created(0),
m_modified(0),
m_recursion_level(0),
m_next_cached_sp(0),
m_param_begin(NULL),
m_param_end(NULL),
m_body_begin(NULL),
m_thd_root(NULL),
m_thd(NULL),
m_pcont(new (&main_mem_root) sp_pcontext()),
m_cont_level(0)
{
mem_root= &main_mem_root;
set_chistics_agg_type(agg_type);
m_first_instance= this;
m_first_free_instance= this;
m_last_cached_sp= this;
m_return_field_def.charset = NULL;
DBUG_ENTER("sp_head::sp_head");
m_security_ctx.init();
m_backpatch.empty();
m_backpatch_goto.empty();
m_cont_backpatch.empty();
m_lex.empty();
my_init_dynamic_array(key_memory_sp_head_main_root, &m_instr,
sizeof(sp_instr *), 16, 8, MYF(0));
my_hash_init(key_memory_sp_head_main_root, &m_sptabs, system_charset_info, 0,
0, 0, sp_table_key, 0, 0);
my_hash_init(key_memory_sp_head_main_root, &m_sroutines, system_charset_info,
0, 0, 0, sp_sroutine_key, 0, 0);
DBUG_VOID_RETURN;
}
sp_package *sp_package::create(LEX *top_level_lex, const sp_name *name,
const Sp_handler *sph)
{
MEM_ROOT own_root;
init_sql_alloc(key_memory_sp_head_main_root, &own_root, MEM_ROOT_BLOCK_SIZE,
MEM_ROOT_PREALLOC, MYF(0));
sp_package *sp;
if (!(sp= new (&own_root) sp_package(&own_root, top_level_lex, name, sph)))
free_root(&own_root, MYF(0));
return sp;
}
sp_package::sp_package(MEM_ROOT *mem_root_arg,
LEX *top_level_lex,
const sp_name *name,
const Sp_handler *sph)
:sp_head(mem_root_arg, NULL, sph, DEFAULT_AGGREGATE),
m_current_routine(NULL),
m_top_level_lex(top_level_lex),
m_rcontext(NULL),
m_invoked_subroutine_count(0),
m_is_instantiated(false),
m_is_cloning_routine(false)
{
init_sp_name(name);
}
sp_package::~sp_package()
{
m_routine_implementations.cleanup();
m_routine_declarations.cleanup();
m_body= null_clex_str;
if (m_current_routine)
sp_head::destroy(m_current_routine->sphead);
delete m_rcontext;
}
/*
Test if two routines have equal specifications
*/
bool sp_head::eq_routine_spec(const sp_head *sp) const
{
// TODO: Add tests for equal return data types (in case of FUNCTION)
// TODO: Add tests for equal argument data types
return
m_handler->type() == sp->m_handler->type() &&
m_pcont->context_var_count() == sp->m_pcont->context_var_count();
}
bool sp_package::validate_after_parser(THD *thd)
{
if (m_handler->type() != SP_TYPE_PACKAGE_BODY)
return false;
sp_head *sp= sp_cache_lookup(&thd->sp_package_spec_cache, this);
sp_package *spec= sp ? sp->get_package() : NULL;
DBUG_ASSERT(spec); // CREATE PACKAGE must already be cached
return validate_public_routines(thd, spec) ||
validate_private_routines(thd);
}
bool sp_package::validate_public_routines(THD *thd, sp_package *spec)
{
/*
Check that all routines declared in CREATE PACKAGE
have implementations in CREATE PACKAGE BODY.
*/
List_iterator<LEX> it(spec->m_routine_declarations);
for (LEX *lex; (lex= it++); )
{
bool found= false;
DBUG_ASSERT(lex->sphead);
List_iterator<LEX> it2(m_routine_implementations);
for (LEX *lex2; (lex2= it2++); )
{
DBUG_ASSERT(lex2->sphead);
if (Sp_handler::eq_routine_name(lex2->sphead->m_name,
lex->sphead->m_name) &&
lex2->sphead->eq_routine_spec(lex->sphead))
{
found= true;
break;
}
}
if (!found)
{
my_error(ER_PACKAGE_ROUTINE_IN_SPEC_NOT_DEFINED_IN_BODY, MYF(0),
ErrConvDQName(lex->sphead).ptr());
return true;
}
}
return false;
}
bool sp_package::validate_private_routines(THD *thd)
{
/*
Check that all forwad declarations in
CREATE PACKAGE BODY have implementations.
*/
List_iterator<LEX> it(m_routine_declarations);
for (LEX *lex; (lex= it++); )
{
bool found= false;
DBUG_ASSERT(lex->sphead);
List_iterator<LEX> it2(m_routine_implementations);
for (LEX *lex2; (lex2= it2++); )
{
DBUG_ASSERT(lex2->sphead);
if (Sp_handler::eq_routine_name(lex2->sphead->m_name,
lex->sphead->m_name) &&
lex2->sphead->eq_routine_spec(lex->sphead))
{
found= true;
break;
}
}
if (!found)
{
my_error(ER_PACKAGE_ROUTINE_FORWARD_DECLARATION_NOT_DEFINED, MYF(0),
ErrConvDQName(lex->sphead).ptr());
return true;
}
}
return false;
}
LEX *sp_package::LexList::find(const LEX_CSTRING &name,
enum_sp_type type)
{
List_iterator<LEX> it(*this);
for (LEX *lex; (lex= it++); )
{
DBUG_ASSERT(lex->sphead);
const char *dot;
if (lex->sphead->m_handler->type() == type &&
(dot= strrchr(lex->sphead->m_name.str, '.')))
{
size_t ofs= dot + 1 - lex->sphead->m_name.str;
LEX_CSTRING non_qualified_sphead_name= lex->sphead->m_name;
non_qualified_sphead_name.str+= ofs;
non_qualified_sphead_name.length-= ofs;
if (Sp_handler::eq_routine_name(non_qualified_sphead_name, name))
return lex;
}
}
return NULL;
}
LEX *sp_package::LexList::find_qualified(const LEX_CSTRING &name,
enum_sp_type type)
{
List_iterator<LEX> it(*this);
for (LEX *lex; (lex= it++); )
{
DBUG_ASSERT(lex->sphead);
if (lex->sphead->m_handler->type() == type &&
Sp_handler::eq_routine_name(lex->sphead->m_name, name))
return lex;
}
return NULL;
}
void sp_package::init_psi_share()
{
List_iterator<LEX> it(m_routine_implementations);
for (LEX *lex; (lex= it++); )
{
DBUG_ASSERT(lex->sphead);
lex->sphead->init_psi_share();
}
sp_head::init_psi_share();
}
void
sp_head::init(LEX *lex)
{
DBUG_ENTER("sp_head::init");
lex->spcont= m_pcont;
if (!lex->spcont)
DBUG_VOID_RETURN;
/*
Altough trg_table_fields list is used only in triggers we init for all
types of stored procedures to simplify reset_lex()/restore_lex() code.
*/
lex->trg_table_fields.empty();
DBUG_VOID_RETURN;
}
void
sp_head::init_sp_name(const sp_name *spname)
{
DBUG_ENTER("sp_head::init_sp_name");
/* Must be initialized in the parser. */
DBUG_ASSERT(spname && spname->m_db.str && spname->m_db.length);
/* We have to copy strings to get them into the right memroot. */
Database_qualified_name::copy(&main_mem_root, spname->m_db, spname->m_name);
m_explicit_name= spname->m_explicit_name;
DBUG_VOID_RETURN;
}
void
sp_head::init_psi_share()
{
m_sp_share= MYSQL_GET_SP_SHARE(m_handler->type(), m_db.str, static_cast<uint>(m_db.length),
m_name.str, static_cast<uint>(m_name.length));
}
void
sp_head::set_body_start(THD *thd, const char *begin_ptr)
{
m_body_begin= begin_ptr;
thd->m_parser_state->m_lip.body_utf8_start(thd, begin_ptr);
}
void
sp_head::set_stmt_end(THD *thd)
{
Lex_input_stream *lip= & thd->m_parser_state->m_lip; /* shortcut */
const char *end_ptr= lip->get_cpp_ptr(); /* shortcut */
/* Make the string of parameters. */
if (m_param_begin && m_param_end)
{
m_params.length= m_param_end - m_param_begin;
m_params.str= thd->strmake(m_param_begin, m_params.length);
}
/* Remember end pointer for further dumping of whole statement. */
thd->lex->stmt_definition_end= end_ptr;
/* Make the string of body (in the original character set). */
m_body.length= end_ptr - m_body_begin;
m_body.str= thd->strmake(m_body_begin, m_body.length);
trim_whitespace(thd->charset(), &m_body);
/* Make the string of UTF-body. */
lip->body_utf8_append(end_ptr);
m_body_utf8.length= lip->get_body_utf8_length();
m_body_utf8.str= thd->strmake(lip->get_body_utf8_str(), m_body_utf8.length);
trim_whitespace(thd->charset(), &m_body_utf8);
/*
Make the string of whole stored-program-definition query (in the
original character set).
*/
m_defstr.length= end_ptr - lip->get_cpp_buf();
m_defstr.str= thd->strmake(lip->get_cpp_buf(), m_defstr.length);
trim_whitespace(thd->charset(), &m_defstr);
}
sp_head::~sp_head()
{
LEX *lex;
sp_instr *i;
DBUG_ENTER("sp_head::~sp_head");
/* sp_head::restore_thd_mem_root() must already have been called. */
DBUG_ASSERT(m_thd == NULL);
for (uint ip = 0 ; (i = get_instr(ip)) ; ip++)
delete i;
delete_dynamic(&m_instr);
delete m_pcont;
free_items();
/*
If we have non-empty LEX stack then we just came out of parser with
error. Now we should delete all auxilary LEXes and restore original
THD::lex. It is safe to not update LEX::ptr because further query
string parsing and execution will be stopped anyway.
*/
while ((lex= (LEX *)m_lex.pop()))
{
THD *thd= lex->thd;
thd->lex->sphead= NULL;
lex_end(thd->lex);
delete thd->lex;
thd->lex= lex;
}
my_hash_free(&m_sptabs);
my_hash_free(&m_sroutines);
sp_head::destroy(m_next_cached_sp);
DBUG_VOID_RETURN;
}
void sp_package::LexList::cleanup()
{
List_iterator<LEX> it(*this);
for (LEX *lex; (lex= it++); )
{
lex_end(lex);
delete lex;
}
}
/**
This is only used for result fields from functions (both during
fix_length_and_dec() and evaluation).
*/
Field *
sp_head::create_result_field(uint field_max_length, const LEX_CSTRING *field_name,
TABLE *table) const
{
Field *field;
LEX_CSTRING name;
DBUG_ENTER("sp_head::create_result_field");
/*
m_return_field_def.length is always set to the field length calculated
by the parser, according to the RETURNS clause. See prepare_create_field()
in sql_table.cc. Value examples, depending on data type:
- 11 for INT (character representation length)
- 20 for BIGINT (character representation length)
- 22 for DOUBLE (character representation length)
- N for CHAR(N) CHARACTER SET latin1 (octet length)
- 3*N for CHAR(N) CHARACTER SET utf8 (octet length)
- 8 for blob-alike data types (packed length !!!)
field_max_length is also set according to the data type in the RETURNS
clause but can have different values depending on the execution stage:
1. During direct execution:
field_max_length is 0, because Item_func_sp::fix_length_and_dec() has
not been called yet, so Item_func_sp::max_length is 0 by default.
2a. During PREPARE:
field_max_length is 0, because Item_func_sp::fix_length_and_dec()
has not been called yet. It's called after create_result_field().
2b. During EXEC:
field_max_length is set to the maximum possible octet length of the
RETURNS data type.
- N for CHAR(N) CHARACTER SET latin1 (octet length)
- 3*N for CHAR(N) CHARACTER SET utf8 (octet length)
- 255 for TINYBLOB (octet length, not packed length !!!)
Perhaps we should refactor prepare_create_field() to set
Create_field::length to maximum octet length for BLOBs,
instead of packed length).
Note, for integer data types, field_max_length can be bigger
than the user specified length, e.g. a field of the INT(1) data type
is translated to the item with max_length=11.
*/
DBUG_ASSERT(field_max_length <= m_return_field_def.length ||
m_return_field_def.type_handler()->cmp_type() == INT_RESULT ||
(current_thd->stmt_arena->is_stmt_execute() &&
m_return_field_def.length == 8 &&
(m_return_field_def.pack_flag &
(FIELDFLAG_BLOB|FIELDFLAG_GEOM))));
if (field_name)
name= *field_name;
else
name= m_name;
field= m_return_field_def.make_field(table->s, /* TABLE_SHARE ptr */
table->in_use->mem_root,
&name);
field->vcol_info= m_return_field_def.vcol_info;
if (field)
field->init(table);
DBUG_RETURN(field);
}
int cmp_rqp_locations(Rewritable_query_parameter * const *a,
Rewritable_query_parameter * const *b)
{
return (int)((*a)->pos_in_query - (*b)->pos_in_query);
}
/*
StoredRoutinesBinlogging
This paragraph applies only to statement-based binlogging. Row-based
binlogging does not need anything special like this.
Top-down overview:
1. Statements
Statements that have is_update_query(stmt) == TRUE are written into the
binary log verbatim.
Examples:
UPDATE tbl SET tbl.x = spfunc_w_side_effects()
UPDATE tbl SET tbl.x=1 WHERE spfunc_w_side_effect_that_returns_false(tbl.y)
Statements that have is_update_query(stmt) == FALSE (e.g. SELECTs) are not
written into binary log. Instead we catch function calls the statement
makes and write it into binary log separately (see #3).
2. PROCEDURE calls
CALL statements are not written into binary log. Instead
* Any FUNCTION invocation (in SET, IF, WHILE, OPEN CURSOR and other SP
instructions) is written into binlog separately.
* Each statement executed in SP is binlogged separately, according to rules
in #1, with the exception that we modify query string: we replace uses
of SP local variables with NAME_CONST('spvar_name', <spvar-value>) calls.
This substitution is done in subst_spvars().
3. FUNCTION calls
In sp_head::execute_function(), we check
* If this function invocation is done from a statement that is written
into the binary log.
* If there were any attempts to write events to the binary log during
function execution (grep for start_union_events and stop_union_events)
If the answers are No and Yes, we write the function call into the binary
log as "SELECT spfunc(<param1value>, <param2value>, ...)"
4. Miscellaneous issues.
4.1 User variables.
When we call mysql_bin_log.write() for an SP statement, thd->user_var_events
must hold set<{var_name, value}> pairs for all user variables used during
the statement execution.
This set is produced by tracking user variable reads during statement
execution.
For SPs, this has the following implications:
1) thd->user_var_events may contain events from several SP statements and
needs to be valid after exection of these statements was finished. In
order to achieve that, we
* Allocate user_var_events array elements on appropriate mem_root (grep
for user_var_events_alloc).
* Use is_query_in_union() to determine if user_var_event is created.
2) We need to empty thd->user_var_events after we have wrote a function
call. This is currently done by making
reset_dynamic(&thd->user_var_events);
calls in several different places. (TODO cosider moving this into
mysql_bin_log.write() function)
4.2 Auto_increment storage in binlog
As we may write two statements to binlog from one single logical statement
(case of "SELECT func1(),func2()": it is binlogged as "SELECT func1()" and
then "SELECT func2()"), we need to reset auto_increment binlog variables
after each binlogged SELECT. Otherwise, the auto_increment value of the
first SELECT would be used for the second too.
*/
/**
Replace thd->query{_length} with a string that one can write to
the binlog.
The binlog-suitable string is produced by replacing references to SP local
variables with NAME_CONST('sp_var_name', value) calls.
@param thd Current thread.
@param instr Instruction (we look for Item_splocal instances in
instr->free_list)
@param query_str Original query string
@return
- FALSE on success.
thd->query{_length} either has been appropriately replaced or there
is no need for replacements.
- TRUE out of memory error.
*/
static bool
subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str)
{
DBUG_ENTER("subst_spvars");
Dynamic_array<Rewritable_query_parameter*> rewritables(PSI_INSTRUMENT_MEM);
char *pbuf;
StringBuffer<512> qbuf;
Copy_query_with_rewrite acc(thd, query_str->str, query_str->length, &qbuf);
/* Find rewritable Items used in this statement */
for (Item *item= instr->free_list; item; item= item->next)
{
Rewritable_query_parameter *rqp= item->get_rewritable_query_parameter();
if (rqp && rqp->pos_in_query)
rewritables.append(rqp);
}
if (!rewritables.elements())
DBUG_RETURN(FALSE);
rewritables.sort(cmp_rqp_locations);
thd->query_name_consts= (uint)rewritables.elements();
for (Rewritable_query_parameter **rqp= rewritables.front();
rqp <= rewritables.back(); rqp++)
{
if (acc.append(*rqp))
DBUG_RETURN(TRUE);
}
if (acc.finalize())
DBUG_RETURN(TRUE);
/*
Allocate additional space at the end of the new query string for the
query_cache_send_result_to_client function.
The query buffer layout is:
buffer :==
<statement> The input statement(s)
'\0' Terminating null char
<length> Length of following current database name 2
<db_name> Name of current database
<flags> Flags struct
*/
size_t buf_len= (qbuf.length() + 1 + QUERY_CACHE_DB_LENGTH_SIZE +
thd->db.length + QUERY_CACHE_FLAGS_SIZE + 1);
if ((pbuf= (char *) alloc_root(thd->mem_root, buf_len)))
{
char *ptr= pbuf + qbuf.length();
memcpy(pbuf, qbuf.ptr(), qbuf.length());
*ptr= 0;
int2store(ptr+1, thd->db.length);
}
else
DBUG_RETURN(TRUE);
thd->set_query(pbuf, qbuf.length());
DBUG_RETURN(FALSE);
}
void Sp_handler_procedure::recursion_level_error(THD *thd,
const sp_head *sp) const
{
my_error(ER_SP_RECURSION_LIMIT, MYF(0),
static_cast<int>(thd->variables.max_sp_recursion_depth),
sp->m_name.str);
}
/**
Execute the routine. The main instruction jump loop is there.
Assume the parameters already set.
@param thd Thread context.
@param merge_da_on_success Flag specifying if Warning Info should be
propagated to the caller on Completion
Condition or not.
@todo
- Will write this SP statement into binlog separately
(TODO: consider changing the condition to "not inside event union")
@return Error status.
@retval
FALSE on success
@retval
TRUE on error
*/
bool
sp_head::execute(THD *thd, bool merge_da_on_success)
{
DBUG_ENTER("sp_head::execute");
char saved_cur_db_name_buf[SAFE_NAME_LEN+1];
LEX_STRING saved_cur_db_name=
{ saved_cur_db_name_buf, sizeof(saved_cur_db_name_buf) };
bool cur_db_changed= FALSE;
sp_rcontext *ctx= thd->spcont;
bool err_status= FALSE;
uint ip= 0;
sql_mode_t save_sql_mode;
// TODO(cvicentiu) See if you can drop this bit. This is used to resume
// execution from where we left off.
if (m_chistics.agg_type == GROUP_AGGREGATE)
ip= thd->spcont->instr_ptr;
bool save_abort_on_warning;
Query_arena *old_arena;
/* per-instruction arena */
MEM_ROOT execute_mem_root;
Query_arena execute_arena(&execute_mem_root, STMT_INITIALIZED_FOR_SP),
backup_arena;
query_id_t old_query_id;
CSET_STRING old_query;
TABLE *old_derived_tables;
TABLE *old_rec_tables;
LEX *old_lex;
Item_change_list old_change_list;
String old_packet;
uint old_server_status;
const uint status_backup_mask= SERVER_STATUS_CURSOR_EXISTS |
SERVER_STATUS_LAST_ROW_SENT;
MEM_ROOT *user_var_events_alloc_saved= 0;
Reprepare_observer *save_reprepare_observer= thd->m_reprepare_observer;
Object_creation_ctx *UNINIT_VAR(saved_creation_ctx);
Diagnostics_area *da= thd->get_stmt_da();
Warning_info sp_wi(da->warning_info_id(), false, true);
/* this 7*STACK_MIN_SIZE is a complex matter with a long history (see it!) */
if (check_stack_overrun(thd, 7 * STACK_MIN_SIZE, (uchar*)&old_packet))
DBUG_RETURN(TRUE);
opt_trace_disable_if_no_security_context_access(thd);
/* init per-instruction memroot */
init_sql_alloc(key_memory_sp_head_execute_root, &execute_mem_root,
MEM_ROOT_BLOCK_SIZE, 0, MYF(0));
DBUG_ASSERT(!(m_flags & IS_INVOKED));
m_flags|= IS_INVOKED;
if (m_parent)
m_parent->m_invoked_subroutine_count++;
m_first_instance->m_first_free_instance= m_next_cached_sp;
if (m_next_cached_sp)
{
DBUG_PRINT("info",
("first free for %p ++: %p->%p level: %lu flags %x",
m_first_instance, this,
m_next_cached_sp,
m_next_cached_sp->m_recursion_level,
m_next_cached_sp->m_flags));
}
/*
Check that if there are not any instances after this one then
pointer to the last instance points on this instance or if there are
some instances after this one then recursion level of next instance
greater then recursion level of current instance on 1
*/
DBUG_ASSERT((m_next_cached_sp == 0 &&
m_first_instance->m_last_cached_sp == this) ||
(m_recursion_level + 1 == m_next_cached_sp->m_recursion_level));
/*
NOTE: The SQL Standard does not specify the context that should be
preserved for stored routines. However, at SAP/Walldorf meeting it was
decided that current database should be preserved.
*/
if (m_db.length &&
(err_status= mysql_opt_change_db(thd, &m_db, &saved_cur_db_name, FALSE,
&cur_db_changed)))
{
goto done;
}
thd->is_slave_error= 0;
old_arena= thd->stmt_arena;
/* Push a new warning information area. */
da->copy_sql_conditions_to_wi(thd, &sp_wi);
da->push_warning_info(&sp_wi);
/*
Switch query context. This has to be done early as this is sometimes
allocated on THD::mem_root
*/
if (m_creation_ctx)
saved_creation_ctx= m_creation_ctx->set_n_backup(thd);
/*
We have to save/restore this info when we are changing call level to
be able properly do close_thread_tables() in instructions.
*/
old_query_id= thd->query_id;
old_query= thd->query_string;
old_derived_tables= thd->derived_tables;
thd->derived_tables= 0;
old_rec_tables= thd->rec_tables;
thd->rec_tables= 0;
save_sql_mode= thd->variables.sql_mode;
thd->variables.sql_mode= m_sql_mode;
save_abort_on_warning= thd->abort_on_warning;
thd->abort_on_warning= 0;
/**
When inside a substatement (a stored function or trigger
statement), clear the metadata observer in THD, if any.
Remember the value of the observer here, to be able
to restore it when leaving the substatement.
We reset the observer to suppress errors when a substatement
uses temporary tables. If a temporary table does not exist
at start of the main statement, it's not prelocked
and thus is not validated with other prelocked tables.
Later on, when the temporary table is opened, metadata
versions mismatch, expectedly.
The proper solution for the problem is to re-validate tables
of substatements (Bug#12257, Bug#27011, Bug#32868, Bug#33000),
but it's not implemented yet.
*/
thd->m_reprepare_observer= 0;
/*
It is also more efficient to save/restore current thd->lex once when
do it in each instruction
*/
old_lex= thd->lex;
/*
We should also save Item tree change list to avoid rollback something
too early in the calling query.
*/
thd->Item_change_list::move_elements_to(&old_change_list);
/*
Cursors will use thd->packet, so they may corrupt data which was prepared
for sending by upper level. OTOH cursors in the same routine can share this
buffer safely so let use use routine-local packet instead of having own
packet buffer for each cursor.
It is probably safe to use same thd->convert_buff everywhere.
*/
old_packet.swap(thd->packet);
old_server_status= thd->server_status & status_backup_mask;
/*
Switch to per-instruction arena here. We can do it since we cleanup
arena after every instruction.
*/
thd->set_n_backup_active_arena(&execute_arena, &backup_arena);
/*
Save callers arena in order to store instruction results and out
parameters in it later during sp_eval_func_item()
*/
thd->spcont->callers_arena= &backup_arena;
#if defined(ENABLED_PROFILING)
/* Discard the initial part of executing routines. */
thd->profiling.discard_current_query();
#endif
sp_instr *i;
DEBUG_SYNC(thd, "sp_head_execute_before_loop");
do
{
#if defined(ENABLED_PROFILING)
/*
Treat each "instr" of a routine as discrete unit that could be profiled.
Profiling only records information for segments of code that set the
source of the query, and almost all kinds of instructions in s-p do not.
*/
thd->profiling.finish_current_query();
thd->profiling.start_new_query("continuing inside routine");
#endif
/* get_instr returns NULL when we're done. */
i = get_instr(ip);
if (i == NULL)
{
#if defined(ENABLED_PROFILING)
thd->profiling.discard_current_query();
#endif
thd->spcont->quit_func= TRUE;
break;
}
/* Reset number of warnings for this query. */
thd->get_stmt_da()->reset_for_next_command();
DBUG_PRINT("execute", ("Instruction %u", ip));
/*
We need to reset start_time to allow for time to flow inside a stored
procedure. This is only done for SP since time is suppose to be constant
during execution of triggers and functions.
*/
reset_start_time_for_sp(thd);
/*
We have to set thd->stmt_arena before executing the instruction
to store in the instruction free_list all new items, created
during the first execution (for example expanding of '*' or the
items made during other permanent subquery transformations).
*/
thd->stmt_arena= i;
/*
Will write this SP statement into binlog separately.
TODO: consider changing the condition to "not inside event union".
*/
if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
{
user_var_events_alloc_saved= thd->user_var_events_alloc;
thd->user_var_events_alloc= thd->mem_root;
}
sql_digest_state *parent_digest= thd->m_digest;
thd->m_digest= NULL;
#ifdef WITH_WSREP
if (WSREP(thd) && thd->wsrep_next_trx_id() == WSREP_UNDEFINED_TRX_ID)
{
thd->set_wsrep_next_trx_id(thd->query_id);
WSREP_DEBUG("assigned new next trx ID for SP, trx id: %" PRIu64, thd->wsrep_next_trx_id());
}
#endif /* WITH_WSREP */
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_statement_locker_state state;
PSI_statement_locker *parent_locker;
PSI_statement_info *psi_info = i->get_psi_info();
parent_locker= thd->m_statement_psi;
thd->m_statement_psi= MYSQL_START_STATEMENT(& state, psi_info->m_key,
thd->db.str, thd->db.length, thd->charset(), m_sp_share);
#endif
err_status= i->execute(thd, &ip);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da());
thd->m_statement_psi= parent_locker;
#endif
#ifdef WITH_WSREP
if (WSREP(thd))
{
if (((thd->wsrep_trx().state() == wsrep::transaction::s_executing || thd->in_sub_stmt) &&
(thd->is_fatal_error || thd->killed)))
{
WSREP_DEBUG("SP abort err status %d in sub %d trx state %d",
err_status, thd->in_sub_stmt, thd->wsrep_trx().state());
err_status= 1;
thd->is_fatal_error= 1;
/*
SP was killed, and it is not due to a wsrep conflict.
We skip after_command hook at this point because
otherwise it clears the error, and cleans up the
whole transaction. For now we just return and finish
our handling once we are back to mysql_parse.
Same applies to a SP execution, which was aborted due
to wsrep related conflict, but which is executing as sub statement.
SP in sub statement level should not commit not rollback,
we have to call for rollback is up-most SP level.
*/
WSREP_DEBUG("Skipping after_command hook for killed SP");
}
else
{
const bool must_replay= wsrep_must_replay(thd);
if (must_replay)
{
WSREP_DEBUG("MUST_REPLAY set after SP, err_status %d trx state: %d",
err_status, thd->wsrep_trx().state());
}
if (wsrep_thd_is_local(thd))
(void) wsrep_after_statement(thd);
/*
Reset the return code to zero if the transaction was
replayed successfully.
*/
if (must_replay && !wsrep_current_error(thd))
{
err_status= 0;
thd->get_stmt_da()->reset_diagnostics_area();
}
/*
Final wsrep error status for statement is known only after
wsrep_after_statement() call. If the error is set, override
error in thd diagnostics area and reset wsrep client_state error
so that the error does not get propagated via client-server protocol.
*/
if (wsrep_current_error(thd))
{
wsrep_override_error(thd, wsrep_current_error(thd),
wsrep_current_error_status(thd));
thd->wsrep_cs().reset_error();
/* Reset also thd->killed if it has been set during BF abort. */
if (thd->killed == KILL_QUERY)
thd->killed= NOT_KILLED;
/* if failed transaction was not replayed, must return with error from here */
if (!must_replay) err_status = 1;
}
}
}
#endif /* WITH_WSREP */
thd->m_digest= parent_digest;
if (i->free_list)
cleanup_items(i->free_list);
/*
If we've set thd->user_var_events_alloc to mem_root of this SP
statement, clean all the events allocated in it.
*/
if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
{
reset_dynamic(&thd->user_var_events);
thd->user_var_events_alloc= user_var_events_alloc_saved;
}
/* we should cleanup free_list and memroot, used by instruction */
thd->cleanup_after_query();
free_root(&execute_mem_root, MYF(0));
/*
Find and process SQL handlers unless it is a fatal error (fatal
errors are not catchable by SQL handlers) or the connection has been
killed during execution.
*/
if (likely(!thd->is_fatal_error) && likely(!thd->killed_errno()) &&
ctx->handle_sql_condition(thd, &ip, i))
{
err_status= FALSE;
}
/* Reset sp_rcontext::end_partial_result_set flag. */
ctx->end_partial_result_set= FALSE;
} while (!err_status && likely(!thd->killed) &&
likely(!thd->is_fatal_error) &&
!thd->spcont->pause_state);
#if defined(ENABLED_PROFILING)
thd->profiling.finish_current_query();
thd->profiling.start_new_query("tail end of routine");
#endif
/* Restore query context. */
if (m_creation_ctx)
m_creation_ctx->restore_env(thd, saved_creation_ctx);
/* Restore arena. */
thd->restore_active_arena(&execute_arena, &backup_arena);
/* Only pop cursors when we're done with group aggregate running. */
if (m_chistics.agg_type != GROUP_AGGREGATE ||
(m_chistics.agg_type == GROUP_AGGREGATE && thd->spcont->quit_func))
thd->spcont->pop_all_cursors(thd); // To avoid memory leaks after an error
/* Restore all saved */
if (m_chistics.agg_type == GROUP_AGGREGATE)
thd->spcont->instr_ptr= ip;
thd->server_status= (thd->server_status & ~status_backup_mask) | old_server_status;
old_packet.swap(thd->packet);
DBUG_ASSERT(thd->Item_change_list::is_empty());
old_change_list.move_elements_to(thd);
thd->lex= old_lex;
thd->set_query_id(old_query_id);
thd->set_query_inner(old_query);
DBUG_ASSERT(!thd->derived_tables);
thd->derived_tables= old_derived_tables;
thd->rec_tables= old_rec_tables;
thd->variables.sql_mode= save_sql_mode;
thd->abort_on_warning= save_abort_on_warning;
thd->m_reprepare_observer= save_reprepare_observer;
thd->stmt_arena= old_arena;
state= STMT_EXECUTED;
/*
Restore the caller's original warning information area:
- warnings generated during trigger execution should not be
propagated to the caller on success;
- if there was an exception during execution, warning info should be
propagated to the caller in any case.
*/
da->pop_warning_info();
if (err_status || merge_da_on_success)
{
/*
If a routine body is empty or if a routine did not generate any warnings,
do not duplicate our own contents by appending the contents of the called
routine. We know that the called routine did not change its warning info.
On the other hand, if the routine body is not empty and some statement in
the routine generates a warning or uses tables, warning info is guaranteed
to have changed. In this case we know that the routine warning info
contains only new warnings, and thus we perform a copy.
*/
if (da->warning_info_changed(&sp_wi))
{
/*
If the invocation of the routine was a standalone statement,
rather than a sub-statement, in other words, if it's a CALL
of a procedure, rather than invocation of a function or a
trigger, we need to clear the current contents of the caller's
warning info.
This is per MySQL rules: if a statement generates a warning,
warnings from the previous statement are flushed. Normally
it's done in push_warning(). However, here we don't use
push_warning() to avoid invocation of condition handlers or
escalation of warnings to errors.
*/
da->opt_clear_warning_info(thd->query_id);
da->copy_sql_conditions_from_wi(thd, &sp_wi);
da->remove_marked_sql_conditions();
if (i != NULL)
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SP_STACK_TRACE,
ER_THD(thd, ER_SP_STACK_TRACE),
i->m_lineno,
m_qname.str != NULL ? m_qname.str :
"anonymous block");
}
}
done:
DBUG_PRINT("info", ("err_status: %d killed: %d is_slave_error: %d report_error: %d",
err_status, thd->killed, thd->is_slave_error,
thd->is_error()));
if (thd->killed)
err_status= TRUE;
/*
If the DB has changed, the pointer has changed too, but the
original thd->db will then have been freed
*/
if (cur_db_changed && thd->killed != KILL_CONNECTION)
{
/*
Force switching back to the saved current database, because it may be
NULL. In this case, mysql_change_db() would generate an error.
*/
err_status|= mysql_change_db(thd, (LEX_CSTRING*)&saved_cur_db_name, TRUE) != 0;
}
m_flags&= ~IS_INVOKED;
if (m_parent)
m_parent->m_invoked_subroutine_count--;
DBUG_PRINT("info",
("first free for %p --: %p->%p, level: %lu, flags %x",
m_first_instance,
m_first_instance->m_first_free_instance,
this, m_recursion_level, m_flags));
/*
Check that we have one of following:
1) there are not free instances which means that this instance is last
in the list of instances (pointer to the last instance point on it and
ther are not other instances after this one in the list)
2) There are some free instances which mean that first free instance
should go just after this one and recursion level of that free instance
should be on 1 more then recursion level of this instance.
*/
DBUG_ASSERT((m_first_instance->m_first_free_instance == 0 &&
this == m_first_instance->m_last_cached_sp &&
m_next_cached_sp == 0) ||
(m_first_instance->m_first_free_instance != 0 &&
m_first_instance->m_first_free_instance == m_next_cached_sp &&
m_first_instance->m_first_free_instance->m_recursion_level ==
m_recursion_level + 1));
m_first_instance->m_first_free_instance= this;
DBUG_RETURN(err_status);
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/**
set_routine_security_ctx() changes routine security context, and
checks if there is an EXECUTE privilege in new context. If there is
no EXECUTE privilege, it changes the context back and returns a
error.
@param thd thread handle
@param sp stored routine to change the context for
@param save_ctx pointer to an old security context
@todo
- Cache if the definer has the right to use the object on the
first usage and only reset the cache if someone does a GRANT
statement that 'may' affect this.
@retval
TRUE if there was a error, and the context wasn't changed.
@retval
FALSE if the context was changed.
*/
bool
set_routine_security_ctx(THD *thd, sp_head *sp, Security_context **save_ctx)
{
*save_ctx= 0;
if (sp->suid() != SP_IS_NOT_SUID &&
sp->m_security_ctx.change_security_context(thd, &sp->m_definer.user,
&sp->m_definer.host,
&sp->m_db,
save_ctx))
return TRUE;
/*
If we changed context to run as another user, we need to check the
access right for the new context again as someone may have revoked
the right to use the procedure from this user.
TODO:
Cache if the definer has the right to use the object on the
first usage and only reset the cache if someone does a GRANT
statement that 'may' affect this.
*/
if (*save_ctx &&
sp->check_execute_access(thd))
{
sp->m_security_ctx.restore_security_context(thd, *save_ctx);
*save_ctx= 0;
return TRUE;
}
return FALSE;
}
#endif // ! NO_EMBEDDED_ACCESS_CHECKS
bool sp_head::check_execute_access(THD *thd) const
{
return m_parent ? m_parent->check_execute_access(thd) :
check_routine_access(thd, EXECUTE_ACL,
&m_db, &m_name,
m_handler, false);
}
/**
Create rcontext optionally using the routine security.
This is important for sql_mode=ORACLE to make sure that the invoker has
access to the tables mentioned in the %TYPE references.
In non-Oracle sql_modes we do not need access to any tables,
so we can omit the security context switch for performance purposes.
@param thd
@param ret_value
@retval NULL - error (access denided or EOM)
@retval !NULL - success (the invoker has rights to all %TYPE tables)
*/
sp_rcontext *sp_head::rcontext_create(THD *thd, Field *ret_value,
Row_definition_list *defs,
bool switch_security_ctx)
{
if (!(m_flags & HAS_COLUMN_TYPE_REFS))
return sp_rcontext::create(thd, this, m_pcont, ret_value, *defs);
sp_rcontext *res= NULL;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context *save_security_ctx;
if (switch_security_ctx &&
set_routine_security_ctx(thd, this, &save_security_ctx))
return NULL;
#endif
if (!defs->resolve_type_refs(thd))
res= sp_rcontext::create(thd, this, m_pcont, ret_value, *defs);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (switch_security_ctx)
m_security_ctx.restore_security_context(thd, save_security_ctx);
#endif
return res;
}
sp_rcontext *sp_head::rcontext_create(THD *thd, Field *ret_value,
List<Item> *args)
{
DBUG_ASSERT(args);
Row_definition_list defs;
m_pcont->retrieve_field_definitions(&defs);
if (defs.adjust_formal_params_to_actual_params(thd, args))
return NULL;
return rcontext_create(thd, ret_value, &defs, true);
}
sp_rcontext *sp_head::rcontext_create(THD *thd, Field *ret_value,
Item **args, uint arg_count)
{
Row_definition_list defs;
m_pcont->retrieve_field_definitions(&defs);
if (defs.adjust_formal_params_to_actual_params(thd, args, arg_count))
return NULL;
return rcontext_create(thd, ret_value, &defs, true);
}
/**
Execute trigger stored program.
- changes security context for triggers
- switch to new memroot
- call sp_head::execute
- restore old memroot
- restores security context
@param thd Thread handle
@param db database name
@param table table name
@param grant_info GRANT_INFO structure to be filled with
information about definer's privileges
on subject table
@todo
- TODO: we should create sp_rcontext once per command and reuse it
on subsequent executions of a trigger.
@retval
FALSE on success
@retval
TRUE on error
*/
bool
sp_head::execute_trigger(THD *thd,
const LEX_CSTRING *db_name,
const LEX_CSTRING *table_name,
GRANT_INFO *grant_info)
{
sp_rcontext *octx = thd->spcont;
sp_rcontext *nctx = NULL;
bool err_status= FALSE;
MEM_ROOT call_mem_root;
Query_arena call_arena(&call_mem_root, Query_arena::STMT_INITIALIZED_FOR_SP);
Query_arena backup_arena;
DBUG_ENTER("sp_head::execute_trigger");
DBUG_PRINT("info", ("trigger %s", m_name.str));
#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context *save_ctx= NULL;
if (suid() != SP_IS_NOT_SUID &&
m_security_ctx.change_security_context(thd,
&m_definer.user,
&m_definer.host,
&m_db,
&save_ctx))
DBUG_RETURN(TRUE);
/*
Fetch information about table-level privileges for subject table into
GRANT_INFO instance. The access check itself will happen in
Item_trigger_field, where this information will be used along with
information about column-level privileges.
*/
fill_effective_table_privileges(thd,
grant_info,
db_name->str,
table_name->str);
/* Check that the definer has TRIGGER privilege on the subject table. */
if (!(grant_info->privilege & TRIGGER_ACL))
{
char priv_desc[128];
get_privilege_desc(priv_desc, sizeof(priv_desc), TRIGGER_ACL);
my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), priv_desc,
thd->security_ctx->priv_user, thd->security_ctx->host_or_ip,
table_name->str);
m_security_ctx.restore_security_context(thd, save_ctx);
DBUG_RETURN(TRUE);
}
#endif // NO_EMBEDDED_ACCESS_CHECKS
/*
Prepare arena and memroot for objects which lifetime is whole
duration of trigger call (sp_rcontext, it's tables and items,
sp_cursor and Item_cache holders for case expressions). We can't
use caller's arena/memroot for those objects because in this case
some fixed amount of memory will be consumed for each trigger
invocation and so statements which involve lot of them will hog
memory.
TODO: we should create sp_rcontext once per command and reuse it
on subsequent executions of a trigger.
*/
init_sql_alloc(key_memory_sp_head_call_root,
&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(0));
thd->set_n_backup_active_arena(&call_arena, &backup_arena);
Row_definition_list defs;
m_pcont->retrieve_field_definitions(&defs);
if (!(nctx= rcontext_create(thd, NULL, &defs, false)))
{
err_status= TRUE;
goto err_with_cleanup;
}
thd->spcont= nctx;
MYSQL_RUN_SP(this, err_status= execute(thd, FALSE));
err_with_cleanup:
thd->restore_active_arena(&call_arena, &backup_arena);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
m_security_ctx.restore_security_context(thd, save_ctx);
#endif // NO_EMBEDDED_ACCESS_CHECKS
delete nctx;
call_arena.free_items();
free_root(&call_mem_root, MYF(0));
thd->spcont= octx;
if (thd->killed)
thd->send_kill_message();
DBUG_RETURN(err_status);
}
/*
Execute the package initialization section.
*/
bool sp_package::instantiate_if_needed(THD *thd)
{
List<Item> args;
if (m_is_instantiated)
return false;
/*
Set m_is_instantiated to true early, to avoid recursion in case if
the package initialization section calls routines from the same package.
*/
m_is_instantiated= true;
/*
Check that the initialization section doesn't contain Dynamic SQL
and doesn't return result sets: such stored procedures can't
be called from a function or trigger.
*/
if (thd->in_sub_stmt)
{
const char *where= (thd->in_sub_stmt & SUB_STMT_TRIGGER ?
"trigger" : "function");
if (is_not_allowed_in_function(where))
goto err;
}
args.elements= 0;
if (execute_procedure(thd, &args))
goto err;
return false;
err:
m_is_instantiated= false;
return true;
}
/**
Execute a function.
- evaluate parameters
- changes security context for SUID routines
- switch to new memroot
- call sp_head::execute
- restore old memroot
- evaluate the return value
- restores security context
@param thd Thread handle
@param argp Passed arguments (these are items from containing
statement?)
@param argcount Number of passed arguments. We need to check if
this is correct.
@param return_value_fld Save result here.
@todo
We should create sp_rcontext once per command and reuse
it on subsequent executions of a function/trigger.
@todo
In future we should associate call arena/mem_root with
sp_rcontext and allocate all these objects (and sp_rcontext
itself) on it directly rather than juggle with arenas.
@retval
FALSE on success
@retval
TRUE on error
*/
bool
sp_head::execute_function(THD *thd, Item **argp, uint argcount,
Field *return_value_fld, sp_rcontext **func_ctx,
Query_arena *call_arena)
{
ulonglong UNINIT_VAR(binlog_save_options);
bool need_binlog_call= FALSE;
uint arg_no;
sp_rcontext *octx = thd->spcont;
char buf[STRING_BUFFER_USUAL_SIZE];
String binlog_buf(buf, sizeof(buf), &my_charset_bin);
bool err_status= FALSE;
Query_arena backup_arena;
DBUG_ENTER("sp_head::execute_function");
DBUG_PRINT("info", ("function %s", m_name.str));
if (m_parent && m_parent->instantiate_if_needed(thd))
DBUG_RETURN(true);
/*
Check that the function is called with all specified arguments.
If it is not, use my_error() to report an error, or it will not terminate
the invoking query properly.
*/
if (argcount != m_pcont->context_var_count())
{
/*
Need to use my_error here, or it will not terminate the
invoking query properly.
*/
my_error(ER_SP_WRONG_NO_OF_ARGS, MYF(0),
"FUNCTION", ErrConvDQName(this).ptr(),
m_pcont->context_var_count(), argcount);
DBUG_RETURN(TRUE);
}
/*
Prepare arena and memroot for objects which lifetime is whole
duration of function call (sp_rcontext, it's tables and items,
sp_cursor and Item_cache holders for case expressions).
We can't use caller's arena/memroot for those objects because
in this case some fixed amount of memory will be consumed for
each function/trigger invocation and so statements which involve
lot of them will hog memory.
TODO: we should create sp_rcontext once per command and reuse
it on subsequent executions of a function/trigger.
*/
if (!(*func_ctx))
{
thd->set_n_backup_active_arena(call_arena, &backup_arena);
if (!(*func_ctx= rcontext_create(thd, return_value_fld, argp, argcount)))
{
thd->restore_active_arena(call_arena, &backup_arena);
err_status= TRUE;
goto err_with_cleanup;
}
/*
We have to switch temporarily back to callers arena/memroot.
Function arguments belong to the caller and so the may reference
memory which they will allocate during calculation long after
this function call will be finished (e.g. in Item::cleanup()).
*/
thd->restore_active_arena(call_arena, &backup_arena);
}
/* Pass arguments. */
for (arg_no= 0; arg_no < argcount; arg_no++)
{
/* Arguments must be fixed in Item_func_sp::fix_fields */
DBUG_ASSERT(argp[arg_no]->fixed());
err_status= bind_input_param(thd, argp[arg_no], arg_no, *func_ctx, TRUE);
if (err_status)
goto err_with_cleanup;
}
/*
If row-based binlogging, we don't need to binlog the function's call, let
each substatement be binlogged its way.
*/
need_binlog_call= mysql_bin_log.is_open() &&
(thd->variables.option_bits & OPTION_BIN_LOG) &&
!thd->is_current_stmt_binlog_format_row();
/*
Remember the original arguments for unrolled replication of functions
before they are changed by execution.
*/
if (need_binlog_call)
{
binlog_buf.length(0);
binlog_buf.append(STRING_WITH_LEN("SELECT "));
append_identifier(thd, &binlog_buf, &m_db);
binlog_buf.append('.');
append_identifier(thd, &binlog_buf, &m_name);
binlog_buf.append('(');
for (arg_no= 0; arg_no < argcount; arg_no++)
{
String str_value_holder;
String *str_value;
if (arg_no)
binlog_buf.append(',');
Item_field *item= (*func_ctx)->get_parameter(arg_no);
str_value= item->type_handler()->print_item_value(thd, item,
&str_value_holder);
if (str_value)
binlog_buf.append(*str_value);
else
binlog_buf.append(NULL_clex_str);
}
binlog_buf.append(')');
}
thd->spcont= *func_ctx;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context *save_security_ctx;
if (set_routine_security_ctx(thd, this, &save_security_ctx))
{
err_status= TRUE;
goto err_with_cleanup;
}
#endif
if (need_binlog_call)
{
query_id_t q;
reset_dynamic(&thd->user_var_events);
/*
In case of artificially constructed events for function calls
we have separate union for each such event and hence can't use
query_id of real calling statement as the start of all these
unions (this will break logic of replication of user-defined
variables). So we use artifical value which is guaranteed to
be greater than all query_id's of all statements belonging
to previous events/unions.
Possible alternative to this is logging of all function invocations
as one select and not resetting THD::user_var_events before
each invocation.
*/
q= get_query_id();
mysql_bin_log.start_union_events(thd, q + 1);
binlog_save_options= thd->variables.option_bits;
thd->variables.option_bits&= ~OPTION_BIN_LOG;
}
opt_trace_disable_if_no_stored_proc_func_access(thd, this);
/*
Switch to call arena/mem_root so objects like sp_cursor or
Item_cache holders for case expressions can be allocated on it.
TODO: In future we should associate call arena/mem_root with
sp_rcontext and allocate all these objects (and sp_rcontext
itself) on it directly rather than juggle with arenas.
*/
thd->set_n_backup_active_arena(call_arena, &backup_arena);
MYSQL_RUN_SP(this, err_status= execute(thd, TRUE));
thd->restore_active_arena(call_arena, &backup_arena);
if (need_binlog_call)
{
mysql_bin_log.stop_union_events(thd);
thd->variables.option_bits= binlog_save_options;
if (thd->binlog_evt_union.unioned_events)
{
int errcode = query_error_code(thd, thd->killed == NOT_KILLED);
Query_log_event qinfo(thd, binlog_buf.ptr(), binlog_buf.length(),
thd->binlog_evt_union.unioned_events_trans, FALSE, FALSE, errcode);
if (mysql_bin_log.write(&qinfo) &&
thd->binlog_evt_union.unioned_events_trans)
{
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"Invoked ROUTINE modified a transactional table but MySQL "
"failed to reflect this change in the binary log");
err_status= TRUE;
}
reset_dynamic(&thd->user_var_events);
/* Forget those values, in case more function calls are binlogged: */
thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty();
}
}
if (!err_status && thd->spcont->quit_func)
{
/* We need result only in function but not in trigger */
if (!(*func_ctx)->is_return_value_set())
{
my_error(ER_SP_NORETURNEND, MYF(0), m_name.str);
err_status= TRUE;
}
else
{
/*
Copy back all OUT or INOUT values to the previous frame, or
set global user variables
*/
for (arg_no= 0; arg_no < argcount; arg_no++)
{
err_status= bind_output_param(thd, argp[arg_no], arg_no, octx, *func_ctx);
if (err_status)
break;
}
}
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
m_security_ctx.restore_security_context(thd, save_security_ctx);
#endif
err_with_cleanup:
thd->spcont= octx;
/*
If not insided a procedure and a function printing warning
messsages.
*/
if (need_binlog_call &&
thd->spcont == NULL && !thd->binlog_evt_union.do_union)
thd->issue_unsafe_warnings();
DBUG_RETURN(err_status);
}
/**
Execute a procedure.
The function does the following steps:
- Set all parameters
- changes security context for SUID routines
- call sp_head::execute
- copy back values of INOUT and OUT parameters
- restores security context
@param thd Thread handle
@param args List of values passed as arguments.
@retval
FALSE on success
@retval
TRUE on error
*/
bool
sp_head::execute_procedure(THD *thd, List<Item> *args)
{
bool err_status= FALSE;
uint params = m_pcont->context_var_count();
/* Query start time may be reset in a multi-stmt SP; keep this for later. */
ulonglong utime_before_sp_exec= thd->utime_after_lock;
sp_rcontext *save_spcont, *octx;
sp_rcontext *nctx = NULL;
bool save_enable_slow_log;
bool save_log_general= false;
sp_package *pkg= get_package();
DBUG_ENTER("sp_head::execute_procedure");
DBUG_PRINT("info", ("procedure %s", m_name.str));
if (m_parent && m_parent->instantiate_if_needed(thd))
DBUG_RETURN(true);
if (args->elements != params)
{
my_error(ER_SP_WRONG_NO_OF_ARGS, MYF(0), "PROCEDURE",
ErrConvDQName(this).ptr(), params, args->elements);
DBUG_RETURN(TRUE);
}
save_spcont= octx= thd->spcont;
if (! octx)
{
/* Create a temporary old context. */
if (!(octx= rcontext_create(thd, NULL, args)))
{
DBUG_PRINT("error", ("Could not create octx"));
DBUG_RETURN(TRUE);
}
thd->spcont= octx;
/* set callers_arena to thd, for upper-level function to work */
thd->spcont->callers_arena= thd;
}
if (!pkg)
{
if (!(nctx= rcontext_create(thd, NULL, args)))
{
delete nctx; /* Delete nctx if it was init() that failed. */
thd->spcont= save_spcont;
DBUG_RETURN(TRUE);
}
}
else
{
if (!pkg->m_rcontext)
{
Query_arena backup_arena;
thd->set_n_backup_active_arena(this, &backup_arena);
nctx= pkg->rcontext_create(thd, NULL, args);
thd->restore_active_arena(this, &backup_arena);
if (!nctx)
{
thd->spcont= save_spcont;
DBUG_RETURN(TRUE);
}
pkg->m_rcontext= nctx;
}
else
nctx= pkg->m_rcontext;
}
if (params > 0)
{
List_iterator<Item> it_args(*args);
DBUG_PRINT("info",(" %.*s: eval args", (int) m_name.length, m_name.str));
for (uint i= 0 ; i < params ; i++)
{
Item *arg_item= it_args++;
if (!arg_item)
break;
err_status= bind_input_param(thd, arg_item, i, nctx, FALSE);
if (err_status)
break;
}
/*
Okay, got values for all arguments. Close tables that might be used by
arguments evaluation. If arguments evaluation required prelocking mode,
we'll leave it here.
*/
thd->lex->unit.cleanup();
if (!thd->in_sub_stmt)
{
thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
thd->get_stmt_da()->set_overwrite_status(false);
}
close_thread_tables(thd);
thd_proc_info(thd, 0);
if (! thd->in_sub_stmt)
{
if (thd->transaction_rollback_request)
{
trans_rollback_implicit(thd);
thd->release_transactional_locks();
}
else if (! thd->in_multi_stmt_transaction_mode())
thd->release_transactional_locks();
else
thd->mdl_context.release_statement_locks();
}
thd->rollback_item_tree_changes();
DBUG_PRINT("info",(" %.*s: eval args done", (int) m_name.length,
m_name.str));
}
save_enable_slow_log= thd->enable_slow_log;
/*
Disable slow log if:
- Slow logging is enabled (no change needed)
- This is a normal SP (not event log)
- If we have not explicitely disabled logging of SP
*/
if (save_enable_slow_log &&
((!(m_flags & LOG_SLOW_STATEMENTS) &&
(thd->variables.log_slow_disabled_statements & LOG_SLOW_DISABLE_SP))))
{
DBUG_PRINT("info", ("Disabling slow log for the execution"));
thd->enable_slow_log= FALSE;
}
/*
Disable general log if:
- If general log is enabled (no change needed)
- This is a normal SP (not event log)
- If we have not explicitely disabled logging of SP
*/
if (!(thd->variables.option_bits & OPTION_LOG_OFF) &&
(!(m_flags & LOG_GENERAL_LOG) &&
(thd->variables.log_disabled_statements & LOG_DISABLE_SP)))
{
DBUG_PRINT("info", ("Disabling general log for the execution"));
save_log_general= true;
/* disable this bit */
thd->variables.option_bits |= OPTION_LOG_OFF;
}
thd->spcont= nctx;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context *save_security_ctx= 0;
if (!err_status)
err_status= set_routine_security_ctx(thd, this, &save_security_ctx);
#endif
opt_trace_disable_if_no_stored_proc_func_access(thd, this);
if (!err_status)
MYSQL_RUN_SP(this, err_status= execute(thd, TRUE));
if (save_log_general)
thd->variables.option_bits &= ~OPTION_LOG_OFF;
thd->enable_slow_log= save_enable_slow_log;
/*
In the case when we weren't able to employ reuse mechanism for
OUT/INOUT paranmeters, we should reallocate memory. This
allocation should be done on the arena which will live through
all execution of calling routine.
*/
thd->spcont->callers_arena= octx->callers_arena;
if (!err_status && params > 0)
{
List_iterator<Item> it_args(*args);
/*
Copy back all OUT or INOUT values to the previous frame, or
set global user variables
*/
for (uint i= 0 ; i < params ; i++)
{
Item *arg_item= it_args++;
if (!arg_item)
break;
err_status= bind_output_param(thd, arg_item, i, octx, nctx);
if (err_status)
break;
}
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (save_security_ctx)
m_security_ctx.restore_security_context(thd, save_security_ctx);
#endif
if (!save_spcont)
delete octx;
if (!pkg)
delete nctx;
thd->spcont= save_spcont;
thd->utime_after_lock= utime_before_sp_exec;
/*
If not insided a procedure and a function printing warning
messsages.
*/
bool need_binlog_call= mysql_bin_log.is_open() &&
(thd->variables.option_bits & OPTION_BIN_LOG) &&
!thd->is_current_stmt_binlog_format_row();
if (need_binlog_call && thd->spcont == NULL &&
!thd->binlog_evt_union.do_union)
thd->issue_unsafe_warnings();
DBUG_RETURN(err_status);
}
bool
sp_head::bind_input_param(THD *thd,
Item *arg_item,
uint arg_no,
sp_rcontext *nctx,
bool is_function)
{
DBUG_ENTER("sp_head::bind_input_param");
sp_variable *spvar= m_pcont->find_variable(arg_no);
if (!spvar)
DBUG_RETURN(FALSE);
if (spvar->mode != sp_variable::MODE_IN)
{
Settable_routine_parameter *srp=
arg_item->get_settable_routine_parameter();
if (!srp)
{
my_error(ER_SP_NOT_VAR_ARG, MYF(0), arg_no+1, ErrConvDQName(this).ptr());
DBUG_RETURN(TRUE);
}
if (is_function)
{
/*
Check if the function is called from SELECT/INSERT/UPDATE/DELETE query
and parameter is OUT or INOUT.
If yes, it is an invalid call - throw error.
*/
if (thd->lex->sql_command == SQLCOM_SELECT ||
thd->lex->sql_command == SQLCOM_INSERT ||
thd->lex->sql_command == SQLCOM_INSERT_SELECT ||
thd->lex->sql_command == SQLCOM_UPDATE ||
thd->lex->sql_command == SQLCOM_DELETE)
{
my_error(ER_SF_OUT_INOUT_ARG_NOT_ALLOWED, MYF(0), arg_no+1, m_name.str);
DBUG_RETURN(TRUE);
}
}
srp->set_required_privilege(spvar->mode == sp_variable::MODE_INOUT);
}
if (spvar->mode == sp_variable::MODE_OUT)
{
Item_null *null_item= new (thd->mem_root) Item_null(thd);
Item *tmp_item= null_item;
if (!null_item ||
nctx->set_parameter(thd, arg_no, &tmp_item))
{
DBUG_PRINT("error", ("set variable failed"));
DBUG_RETURN(TRUE);
}
}
else
{
if (nctx->set_parameter(thd, arg_no, &arg_item))
{
DBUG_PRINT("error", ("set variable 2 failed"));
DBUG_RETURN(TRUE);
}
}
TRANSACT_TRACKER(add_trx_state_from_thd(thd));
DBUG_RETURN(FALSE);
}
bool
sp_head::bind_output_param(THD *thd,
Item *arg_item,
uint arg_no,
sp_rcontext *octx,
sp_rcontext *nctx)
{
DBUG_ENTER("sp_head::bind_output_param");
sp_variable *spvar= m_pcont->find_variable(arg_no);
if (spvar->mode == sp_variable::MODE_IN)
DBUG_RETURN(FALSE);
Settable_routine_parameter *srp=
arg_item->get_settable_routine_parameter();
DBUG_ASSERT(srp);
if (srp->set_value(thd, octx, nctx->get_variable_addr(arg_no)))
{
DBUG_PRINT("error", ("set value failed"));
DBUG_RETURN(TRUE);
}
Send_field *out_param_info= new (thd->mem_root) Send_field(thd, nctx->get_parameter(arg_no));
out_param_info->db_name= m_db;
out_param_info->table_name= m_name;
out_param_info->org_table_name= m_name;
out_param_info->col_name= spvar->name;
out_param_info->org_col_name= spvar->name;
srp->set_out_param_info(out_param_info);
DBUG_RETURN(FALSE);
}
/**
Reset lex during parsing, before we parse a sub statement.
@param thd Thread handler.
@return Error state
@retval true An error occurred.
@retval false Success.
*/
bool
sp_head::reset_lex(THD *thd, sp_lex_local *sublex)
{
DBUG_ENTER("sp_head::reset_lex");
LEX *oldlex= thd->lex;
thd->set_local_lex(sublex);
DBUG_RETURN(m_lex.push_front(oldlex));
}
bool
sp_head::reset_lex(THD *thd)
{
DBUG_ENTER("sp_head::reset_lex");
sp_lex_local *sublex= new (thd->mem_root) sp_lex_local(thd, thd->lex);
DBUG_RETURN(sublex ? reset_lex(thd, sublex) : true);
}
/**
Restore lex during parsing, after we have parsed a sub statement.
@param thd Thread handle
@param oldlex The upper level lex we're near to restore to
@param sublex The local lex we're near to restore from
@return
@retval TRUE failure
@retval FALSE success
*/
bool
sp_head::merge_lex(THD *thd, LEX *oldlex, LEX *sublex)
{
DBUG_ENTER("sp_head::merge_lex");
sublex->set_trg_event_type_for_tables();
oldlex->trg_table_fields.push_back(&sublex->trg_table_fields);
/* If this substatement is unsafe, the entire routine is too. */
DBUG_PRINT("info", ("sublex->get_stmt_unsafe_flags: 0x%x",
sublex->get_stmt_unsafe_flags()));
unsafe_flags|= sublex->get_stmt_unsafe_flags();
/*
Add routines which are used by statement to respective set for
this routine.
*/
if (sp_update_sp_used_routines(&m_sroutines, &sublex->sroutines))
DBUG_RETURN(TRUE);
/* If this substatement is a update query, then mark MODIFIES_DATA */
if (is_update_query(sublex->sql_command))
m_flags|= MODIFIES_DATA;
/*
Merge tables used by this statement (but not by its functions or
procedures) to multiset of tables used by this routine.
*/
merge_table_list(thd, sublex->query_tables, sublex);
/* Merge lists of PS parameters. */
oldlex->param_list.append(&sublex->param_list);
DBUG_RETURN(FALSE);
}
/**
Put the instruction on the backpatch list, associated with the label.
*/
int
sp_head::push_backpatch(THD *thd, sp_instr *i, sp_label *lab,
List<bp_t> *list, backpatch_instr_type itype)
{
bp_t *bp= (bp_t *) thd->alloc(sizeof(bp_t));
if (!bp)
return 1;
bp->lab= lab;
bp->instr= i;
bp->instr_type= itype;
return list->push_front(bp);
}
int
sp_head::push_backpatch(THD *thd, sp_instr *i, sp_label *lab)
{
return push_backpatch(thd, i, lab, &m_backpatch, GOTO);
}
int
sp_head::push_backpatch_goto(THD *thd, sp_pcontext *ctx, sp_label *lab)
{
uint ip= instructions();
/*
Add cpop/hpop : they will be removed or updated later if target is in
the same block or not
*/
sp_instr_hpop *hpop= new (thd->mem_root) sp_instr_hpop(ip++, ctx, 0);
if (hpop == NULL || add_instr(hpop))
return true;
if (push_backpatch(thd, hpop, lab, &m_backpatch_goto, HPOP))
return true;
sp_instr_cpop *cpop= new (thd->mem_root) sp_instr_cpop(ip++, ctx, 0);
if (cpop == NULL || add_instr(cpop))
return true;
if (push_backpatch(thd, cpop, lab, &m_backpatch_goto, CPOP))
return true;
// Add jump with ip=0. IP will be updated when label is found.
sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(ip, ctx);
if (i == NULL || add_instr(i))
return true;
if (push_backpatch(thd, i, lab, &m_backpatch_goto, GOTO))
return true;
return false;
}
/**
Update all instruction with this label in the backpatch list to
the current position.
*/
void
sp_head::backpatch(sp_label *lab)
{
bp_t *bp;
uint dest= instructions();
List_iterator_fast<bp_t> li(m_backpatch);
DBUG_ENTER("sp_head::backpatch");
while ((bp= li++))
{
if (bp->lab == lab)
{
DBUG_PRINT("info", ("backpatch: (m_ip %d, label %p <%s>) to dest %d",
bp->instr->m_ip, lab, lab->name.str, dest));
bp->instr->backpatch(dest, lab->ctx);
}
}
DBUG_VOID_RETURN;
}
void
sp_head::backpatch_goto(THD *thd, sp_label *lab,sp_label *lab_begin_block)
{
bp_t *bp;
uint dest= instructions();
List_iterator<bp_t> li(m_backpatch_goto);
DBUG_ENTER("sp_head::backpatch_goto");
while ((bp= li++))
{
if (bp->instr->m_ip < lab_begin_block->ip || bp->instr->m_ip > lab->ip)
{
/*
Update only jump target from the beginning of the block where the
label is defined.
*/
continue;
}
if (lex_string_cmp(system_charset_info, &bp->lab->name, &lab->name) == 0)
{
if (bp->instr_type == GOTO)
{
DBUG_PRINT("info",
("backpatch_goto: (m_ip %d, label %p <%s>) to dest %d",
bp->instr->m_ip, lab, lab->name.str, dest));
bp->instr->backpatch(dest, lab->ctx);
// Jump resolved, remove from the list
li.remove();
continue;
}
if (bp->instr_type == CPOP)
{
uint n= bp->instr->m_ctx->diff_cursors(lab_begin_block->ctx, true);
if (n == 0)
{
// Remove cpop instr
replace_instr_to_nop(thd,bp->instr->m_ip);
}
else
{
// update count of cpop
static_cast<sp_instr_cpop*>(bp->instr)->update_count(n);
n= 1;
}
li.remove();
continue;
}
if (bp->instr_type == HPOP)
{
uint n= bp->instr->m_ctx->diff_handlers(lab_begin_block->ctx, true);
if (n == 0)
{
// Remove hpop instr
replace_instr_to_nop(thd,bp->instr->m_ip);
}
else
{
// update count of cpop
static_cast<sp_instr_hpop*>(bp->instr)->update_count(n);
n= 1;
}
li.remove();
continue;
}
}
}
DBUG_VOID_RETURN;
}
bool
sp_head::check_unresolved_goto()
{
DBUG_ENTER("sp_head::check_unresolved_goto");
bool has_unresolved_label=false;
if (m_backpatch_goto.elements > 0)
{
List_iterator_fast<bp_t> li(m_backpatch_goto);
while (bp_t* bp= li++)
{
if (bp->instr_type == GOTO)
{
my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "GOTO", bp->lab->name.str);
has_unresolved_label=true;
}
}
}
DBUG_RETURN(has_unresolved_label);
}
int
sp_head::new_cont_backpatch(sp_instr_opt_meta *i)
{
m_cont_level+= 1;
if (i)
{
/* Use the cont. destination slot to store the level */
i->m_cont_dest= m_cont_level;
if (m_cont_backpatch.push_front(i))
return 1;
}
return 0;
}
int
sp_head::add_cont_backpatch(sp_instr_opt_meta *i)
{
i->m_cont_dest= m_cont_level;
return m_cont_backpatch.push_front(i);
}
void
sp_head::do_cont_backpatch()
{
uint dest= instructions();
uint lev= m_cont_level--;
sp_instr_opt_meta *i;
while ((i= m_cont_backpatch.head()) && i->m_cont_dest == lev)
{
i->m_cont_dest= dest;
(void)m_cont_backpatch.pop();
}
}
bool
sp_head::sp_add_instr_cpush_for_cursors(THD *thd, sp_pcontext *pcontext)
{
for (uint i= 0; i < pcontext->frame_cursor_count(); i++)
{
const sp_pcursor *c= pcontext->get_cursor_by_local_frame_offset(i);
sp_instr_cpush *instr= new (thd->mem_root)
sp_instr_cpush(instructions(), pcontext, c->lex(),
pcontext->cursor_offset() + i);
if (instr == NULL || add_instr(instr))
return true;
}
return false;
}
void
sp_head::set_chistics(const st_sp_chistics &chistics)
{
m_chistics.set(chistics);
if (m_chistics.comment.length == 0)
m_chistics.comment.str= 0;
else
m_chistics.comment.str= strmake_root(mem_root,
m_chistics.comment.str,
m_chistics.comment.length);
}
void
sp_head::set_c_chistics(const st_sp_chistics &chistics)
{
// Set all chistics but preserve agg_type.
enum_sp_aggregate_type save_agg_type= agg_type();
set_chistics(chistics);
set_chistics_agg_type(save_agg_type);
}
void
sp_head::set_info(longlong created, longlong modified,
const st_sp_chistics &chistics, sql_mode_t sql_mode)
{
m_created= created;
m_modified= modified;
set_chistics(chistics);
m_sql_mode= sql_mode;
}
void
sp_head::reset_thd_mem_root(THD *thd)
{
DBUG_ENTER("sp_head::reset_thd_mem_root");
m_thd_root= thd->mem_root;
thd->mem_root= &main_mem_root;
DBUG_PRINT("info", ("mem_root %p moved to thd mem root %p",
&mem_root, &thd->mem_root));
free_list= thd->free_list; // Keep the old list
thd->free_list= NULL; // Start a new one
m_thd= thd;
DBUG_VOID_RETURN;
}
void
sp_head::restore_thd_mem_root(THD *thd)
{
DBUG_ENTER("sp_head::restore_thd_mem_root");
/*
In some cases our parser detects a syntax error and calls
LEX::cleanup_lex_after_parse_error() method only after
finishing parsing the whole routine. In such a situation
sp_head::restore_thd_mem_root() will be called twice - the
first time as part of normal parsing process and the second
time by cleanup_lex_after_parse_error().
To avoid ruining active arena/mem_root state in this case we
skip restoration of old arena/mem_root if this method has been
already called for this routine.
*/
if (!m_thd)
DBUG_VOID_RETURN;
Item *flist= free_list; // The old list
set_query_arena(thd); // Get new free_list and mem_root
state= STMT_INITIALIZED_FOR_SP;
DBUG_PRINT("info", ("mem_root %p returned from thd mem root %p",
&mem_root, &thd->mem_root));
thd->free_list= flist; // Restore the old one
thd->mem_root= m_thd_root;
m_thd= NULL;
DBUG_VOID_RETURN;
}
/**
Check if a user has access right to a routine.
@param thd Thread handler
@param sp SP
@param full_access Set to 1 if the user has SELECT right to the
'mysql.proc' able or is the owner of the routine
@retval
false ok
@retval
true error
*/
bool check_show_routine_access(THD *thd, sp_head *sp, bool *full_access)
{
TABLE_LIST tables;
bzero((char*) &tables,sizeof(tables));
tables.db= MYSQL_SCHEMA_NAME;
tables.table_name= MYSQL_PROC_NAME;
tables.alias= MYSQL_PROC_NAME;
*full_access= ((!check_table_access(thd, SELECT_ACL, &tables, FALSE,
1, TRUE) &&
(tables.grant.privilege & SELECT_ACL) != NO_ACL) ||
/* Check if user owns the routine. */
(!strcmp(sp->m_definer.user.str,
thd->security_ctx->priv_user) &&
!strcmp(sp->m_definer.host.str,
thd->security_ctx->priv_host)) ||
/* Check if current role or any of the sub-granted roles
own the routine. */
(sp->m_definer.host.length == 0 &&
(!strcmp(sp->m_definer.user.str,
thd->security_ctx->priv_role) ||
check_role_is_granted(thd->security_ctx->priv_role, NULL,
sp->m_definer.user.str))));
if (!*full_access)
return check_some_routine_access(thd, sp->m_db.str, sp->m_name.str,
sp->m_handler);
return 0;
}
/**
Collect metadata for SHOW CREATE statement for stored routines.
@param thd Thread context.
@param sph Stored routine handler
@param fields Item list to populate
@return Error status.
@retval FALSE on success
@retval TRUE on error
*/
void
sp_head::show_create_routine_get_fields(THD *thd, const Sp_handler *sph,
List<Item> *fields)
{
const char *col1_caption= sph->show_create_routine_col1_caption();
const char *col3_caption= sph->show_create_routine_col3_caption();
MEM_ROOT *mem_root= thd->mem_root;
/* Send header. */
fields->push_back(new (mem_root)
Item_empty_string(thd, col1_caption, NAME_CHAR_LEN),
mem_root);
fields->push_back(new (mem_root)
Item_empty_string(thd, "sql_mode", 256),
mem_root);
{
/*
NOTE: SQL statement field must be not less than 1024 in order not to
confuse old clients.
*/
Item_empty_string *stmt_fld=
new (mem_root) Item_empty_string(thd, col3_caption, 1024);
stmt_fld->set_maybe_null();
fields->push_back(stmt_fld, mem_root);
}
fields->push_back(new (mem_root)
Item_empty_string(thd, "character_set_client",
MY_CS_NAME_SIZE),
mem_root);
fields->push_back(new (mem_root)
Item_empty_string(thd, "collation_connection",
MY_CS_NAME_SIZE),
mem_root);
fields->push_back(new (mem_root)
Item_empty_string(thd, "Database Collation",
MY_CS_NAME_SIZE),
mem_root);
}
/**
Implement SHOW CREATE statement for stored routines.
@param thd Thread context.
@param sph Stored routine handler
@return Error status.
@retval FALSE on success
@retval TRUE on error
*/
bool
sp_head::show_create_routine(THD *thd, const Sp_handler *sph)
{
const char *col1_caption= sph->show_create_routine_col1_caption();
const char *col3_caption= sph->show_create_routine_col3_caption();
bool err_status;
Protocol *protocol= thd->protocol;
List<Item> fields;
LEX_CSTRING sql_mode;
bool full_access;
MEM_ROOT *mem_root= thd->mem_root;
DBUG_ENTER("sp_head::show_create_routine");
DBUG_PRINT("info", ("routine %s", m_name.str));
if (check_show_routine_access(thd, this, &full_access))
DBUG_RETURN(TRUE);
sql_mode_string_representation(thd, m_sql_mode, &sql_mode);
/* Send header. */
fields.push_back(new (mem_root)
Item_empty_string(thd, col1_caption, NAME_CHAR_LEN),
thd->mem_root);
fields.push_back(new (mem_root)
Item_empty_string(thd, "sql_mode", (uint)sql_mode.length),
thd->mem_root);
{
/*
NOTE: SQL statement field must be not less than 1024 in order not to
confuse old clients.
*/
Item_empty_string *stmt_fld=
new (mem_root) Item_empty_string(thd, col3_caption,
(uint)MY_MAX(m_defstr.length, 1024));
stmt_fld->set_maybe_null();
fields.push_back(stmt_fld, thd->mem_root);
}
fields.push_back(new (mem_root)
Item_empty_string(thd, "character_set_client",
MY_CS_NAME_SIZE),
thd->mem_root);
fields.push_back(new (mem_root)
Item_empty_string(thd, "collation_connection",
MY_CS_NAME_SIZE),
thd->mem_root);
fields.push_back(new (mem_root)
Item_empty_string(thd, "Database Collation",
MY_CS_NAME_SIZE),
thd->mem_root);
if (protocol->send_result_set_metadata(&fields,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
{
DBUG_RETURN(TRUE);
}
/* Send data. */
protocol->prepare_for_resend();
protocol->store(m_name.str, m_name.length, system_charset_info);
protocol->store(sql_mode.str, sql_mode.length, system_charset_info);
if (full_access)
protocol->store(m_defstr.str, m_defstr.length,
m_creation_ctx->get_client_cs());
else
protocol->store_null();
protocol->store(&m_creation_ctx->get_client_cs()->cs_name,
system_charset_info);
protocol->store(&m_creation_ctx->get_connection_cl()->coll_name,
system_charset_info);
protocol->store(&m_creation_ctx->get_db_cl()->coll_name,
system_charset_info);
err_status= protocol->write();
if (!err_status)
my_eof(thd);
DBUG_RETURN(err_status);
}
/**
Add instruction to SP.
@param instr Instruction
*/
int sp_head::add_instr(sp_instr *instr)
{
instr->free_list= m_thd->free_list;
m_thd->free_list= 0;
/*
Memory root of every instruction is designated for permanent
transformations (optimizations) made on the parsed tree during
the first execution. It points to the memory root of the
entire stored procedure, as their life span is equal.
*/
instr->mem_root= &main_mem_root;
instr->m_lineno= m_thd->m_parser_state->m_lip.yylineno;
return insert_dynamic(&m_instr, (uchar*)&instr);
}
bool sp_head::add_instr_jump(THD *thd, sp_pcontext *spcont)
{
sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(instructions(), spcont);
return i == NULL || add_instr(i);
}
bool sp_head::add_instr_jump(THD *thd, sp_pcontext *spcont, uint dest)
{
sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(instructions(),
spcont, dest);
return i == NULL || add_instr(i);
}
bool sp_head::add_instr_jump_forward_with_backpatch(THD *thd,
sp_pcontext *spcont,
sp_label *lab)
{
sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(instructions(), spcont);
if (i == NULL || add_instr(i))
return true;
push_backpatch(thd, i, lab);
return false;
}
bool sp_head::add_instr_freturn(THD *thd, sp_pcontext *spcont,
Item *item, LEX *lex)
{
sp_instr_freturn *i= new (thd->mem_root)
sp_instr_freturn(instructions(), spcont, item,
m_return_field_def.type_handler(), lex);
if (i == NULL || add_instr(i))
return true;
m_flags|= sp_head::HAS_RETURN;
return false;
}
bool sp_head::add_instr_preturn(THD *thd, sp_pcontext *spcont)
{
sp_instr_preturn *i= new (thd->mem_root)
sp_instr_preturn(instructions(), spcont);
if (i == NULL || add_instr(i))
return true;
return false;
}
/*
Replace an instruction at position to "no operation".
@param thd - use mem_root of this THD for "new".
@param ip - position of the operation
@returns - true on error, false on success
When we need to remove an instruction that during compilation
appeared to be useless (typically as useless jump), we replace
it to a jump to exactly the next instruction.
Such jumps are later removed during sp_head::optimize().
QQ: Perhaps we need a dedicated sp_instr_nop for this purpose.
*/
bool sp_head::replace_instr_to_nop(THD *thd, uint ip)
{
sp_instr *instr= get_instr(ip);
sp_instr_jump *nop= new (thd->mem_root) sp_instr_jump(instr->m_ip,
instr->m_ctx,
instr->m_ip + 1);
if (!nop)
return true;
delete instr;
set_dynamic(&m_instr, (uchar *) &nop, ip);
return false;
}
/**
Do some minimal optimization of the code:
-# Mark used instructions
-# While doing this, shortcut jumps to jump instructions
-# Compact the code, removing unused instructions.
This is the main mark and move loop; it relies on the following methods
in sp_instr and its subclasses:
- opt_mark() : Mark instruction as reachable
- opt_shortcut_jump(): Shortcut jumps to the final destination;
used by opt_mark().
- opt_move() : Update moved instruction
- set_destination() : Set the new destination (jump instructions only)
*/
void sp_head::optimize()
{
List<sp_instr> bp;
sp_instr *i;
uint src, dst;
DBUG_EXECUTE_IF("sp_head_optimize_disable", return; );
opt_mark();
bp.empty();
src= dst= 0;
while ((i= get_instr(src)))
{
if (! i->marked)
{
delete i;
src+= 1;
}
else
{
if (src != dst)
{
/* Move the instruction and update prev. jumps */
sp_instr *ibp;
List_iterator_fast<sp_instr> li(bp);
set_dynamic(&m_instr, (uchar*)&i, dst);
while ((ibp= li++))
{
sp_instr_opt_meta *im= static_cast<sp_instr_opt_meta *>(ibp);
im->set_destination(src, dst);
}
}
i->opt_move(dst, &bp);
src+= 1;
dst+= 1;
}
}
m_instr.elements= dst;
bp.empty();
}
void sp_head::add_mark_lead(uint ip, List<sp_instr> *leads)
{
sp_instr *i= get_instr(ip);
if (i && ! i->marked)
leads->push_front(i);
}
void
sp_head::opt_mark()
{
uint ip;
sp_instr *i;
List<sp_instr> leads;
/*
Forward flow analysis algorithm in the instruction graph:
- first, add the entry point in the graph (the first instruction) to the
'leads' list of paths to explore.
- while there are still leads to explore:
- pick one lead, and follow the path forward. Mark instruction reached.
Stop only if the end of the routine is reached, or the path converge
to code already explored (marked).
- while following a path, collect in the 'leads' list any fork to
another path (caused by conditional jumps instructions), so that these
paths can be explored as well.
*/
/* Add the entry point */
i= get_instr(0);
leads.push_front(i);
/* For each path of code ... */
while (leads.elements != 0)
{
i= leads.pop();
/* Mark the entire path, collecting new leads. */
while (i && ! i->marked)
{
ip= i->opt_mark(this, & leads);
i= get_instr(ip);
}
}
}
#ifndef DBUG_OFF
/**
Return the routine instructions as a result set.
@return
0 if ok, !=0 on error.
*/
int
sp_head::show_routine_code(THD *thd)
{
Protocol *protocol= thd->protocol;
char buff[2048];
String buffer(buff, sizeof(buff), system_charset_info);
List<Item> field_list;
sp_instr *i;
bool full_access;
int res= 0;
uint ip;
DBUG_ENTER("sp_head::show_routine_code");
DBUG_PRINT("info", ("procedure: %s", m_name.str));
if (check_show_routine_access(thd, this, &full_access) || !full_access)
DBUG_RETURN(1);
field_list.push_back(new (thd->mem_root) Item_uint(thd, "Pos", 9),
thd->mem_root);
// 1024 is for not to confuse old clients
field_list.push_back(new (thd->mem_root)
Item_empty_string(thd, "Instruction",
MY_MAX(buffer.length(), 1024)),
thd->mem_root);
if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS |
Protocol::SEND_EOF))
DBUG_RETURN(1);
for (ip= 0; (i = get_instr(ip)) ; ip++)
{
/*
Consistency check. If these are different something went wrong
during optimization.
*/
if (ip != i->m_ip)
{
const char *format= "Instruction at position %u has m_ip=%u";
char tmp[sizeof(format) + 2*SP_INSTR_UINT_MAXLEN + 1];
my_snprintf(tmp, sizeof(tmp), format, ip, i->m_ip);
/*
Since this is for debugging purposes only, we don't bother to
introduce a special error code for it.
*/
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, tmp);
}
protocol->prepare_for_resend();
protocol->store_long(ip);
buffer.set("", 0, system_charset_info);
i->print(&buffer);
protocol->store(buffer.ptr(), buffer.length(), system_charset_info);
if ((res= protocol->write()))
break;
}
if (!res)
my_eof(thd);
DBUG_RETURN(res);
}
#endif // ifndef DBUG_OFF
/**
Prepare LEX and thread for execution of instruction, if requested open
and lock LEX's tables, execute instruction's core function, perform
cleanup afterwards.
@param thd thread context
@param nextp out - next instruction
@param open_tables if TRUE then check read access to tables in LEX's table
list and open and lock them (used in instructions which
need to calculate some expression and don't execute
complete statement).
@param sp_instr instruction for which we prepare context, and which core
function execute by calling its exec_core() method.
@note
We are not saving/restoring some parts of THD which may need this because
we do this once for whole routine execution in sp_head::execute().
@return
0/non-0 - Success/Failure
*/
int
sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
bool open_tables, sp_instr* instr)
{
int res= 0;
DBUG_ENTER("reset_lex_and_exec_core");
/*
The flag is saved at the entry to the following substatement.
It's reset further in the common code part.
It's merged with the saved parent's value at the exit of this func.
*/
bool parent_modified_non_trans_table=
thd->transaction->stmt.modified_non_trans_table;
unsigned int parent_unsafe_rollback_flags=
thd->transaction->stmt.m_unsafe_rollback_flags;
thd->transaction->stmt.modified_non_trans_table= FALSE;
thd->transaction->stmt.m_unsafe_rollback_flags= 0;
DBUG_ASSERT(!thd->derived_tables);
DBUG_ASSERT(thd->Item_change_list::is_empty());
/*
Use our own lex.
We should not save old value since it is saved/restored in
sp_head::execute() when we are entering/leaving routine.
*/
thd->lex= m_lex;
thd->set_query_id(next_query_id());
if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
{
/*
This statement will enter/leave prelocked mode on its own.
Entering prelocked mode changes table list and related members
of LEX, so we'll need to restore them.
*/
if (lex_query_tables_own_last)
{
/*
We've already entered/left prelocked mode with this statement.
Attach the list of tables that need to be prelocked and mark m_lex
as having such list attached.
*/
*lex_query_tables_own_last= prelocking_tables;
m_lex->mark_as_requiring_prelocking(lex_query_tables_own_last);
}
}
reinit_stmt_before_use(thd, m_lex);
#ifndef EMBEDDED_LIBRARY
/*
If there was instruction which changed tracking state,
the result of changed tracking state send to client in OK packed.
So it changes result sent to client and probably can be different
independent on query text. So we can't cache such results.
*/
if ((thd->client_capabilities & CLIENT_SESSION_TRACK) &&
(thd->server_status & SERVER_SESSION_STATE_CHANGED))
thd->lex->safe_to_cache_query= 0;
#endif
Opt_trace_start ots(thd);
ots.init(thd, m_lex->query_tables, SQLCOM_SELECT, &m_lex->var_list,
NULL, 0, thd->variables.character_set_client);
Json_writer_object trace_command(thd);
Json_writer_array trace_command_steps(thd, "steps");
if (open_tables)
res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables);
if (likely(!res))
{
res= instr->exec_core(thd, nextp);
DBUG_PRINT("info",("exec_core returned: %d", res));
}
/*
Call after unit->cleanup() to close open table
key read.
*/
if (open_tables)
{
m_lex->unit.cleanup();
/* Here we also commit or rollback the current statement. */
if (! thd->in_sub_stmt)
{
thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
thd->get_stmt_da()->set_overwrite_status(false);
}
close_thread_tables(thd);
thd_proc_info(thd, 0);
if (! thd->in_sub_stmt)
{
if (thd->transaction_rollback_request)
{
trans_rollback_implicit(thd);
thd->release_transactional_locks();
}
else if (! thd->in_multi_stmt_transaction_mode())
thd->release_transactional_locks();
else
thd->mdl_context.release_statement_locks();
}
}
//TODO: why is this here if log_slow_query is in sp_instr_stmt::execute?
delete_explain_query(m_lex);
if (m_lex->query_tables_own_last)
{
/*
We've entered and left prelocking mode when executing statement
stored in m_lex.
m_lex->query_tables(->next_global)* list now has a 'tail' - a list
of tables that are added for prelocking. (If this is the first
execution, the 'tail' was added by open_tables(), otherwise we've
attached it above in this function).
Now we'll save the 'tail', and detach it.
*/
lex_query_tables_own_last= m_lex->query_tables_own_last;
prelocking_tables= *lex_query_tables_own_last;
*lex_query_tables_own_last= NULL;
m_lex->mark_as_requiring_prelocking(NULL);
}
thd->rollback_item_tree_changes();
/*
Update the state of the active arena if no errors on
open_tables stage.
*/
if (likely(!res) || likely(!thd->is_error()))
thd->stmt_arena->state= Query_arena::STMT_EXECUTED;
/*
Merge here with the saved parent's values
what is needed from the substatement gained
*/
thd->transaction->stmt.modified_non_trans_table |= parent_modified_non_trans_table;
thd->transaction->stmt.m_unsafe_rollback_flags |= parent_unsafe_rollback_flags;
TRANSACT_TRACKER(add_trx_state_from_thd(thd));
/*
Unlike for PS we should not call Item's destructors for newly created
items after execution of each instruction in stored routine. This is
because SP often create Item (like Item_int, Item_string etc...) when
they want to store some value in local variable, pass return value and
etc... So their life time should be longer than one instruction.
cleanup_items() is called in sp_head::execute()
*/
thd->lex->restore_set_statement_var();
DBUG_RETURN(res || thd->is_error());
}
int sp_lex_keeper::cursor_reset_lex_and_exec_core(THD *thd, uint *nextp,
bool open_tables,
sp_instr *instr)
{
Query_arena *old_arena= thd->stmt_arena;
/*
Get the Query_arena from the cursor statement LEX, which contains
the free_list of the query, so new items (if any) are stored in
the right free_list, and we can cleanup after each cursor operation,
e.g. open or cursor_copy_struct (for cursor%ROWTYPE variables).
*/
thd->stmt_arena= m_lex->query_arena();
int res= reset_lex_and_exec_core(thd, nextp, open_tables, instr);
cleanup_items(thd->stmt_arena->free_list);
thd->stmt_arena= old_arena;
return res;
}
/*
sp_instr class functions
*/
int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables)
{
int result;
/*
Check whenever we have access to tables for this statement
and open and lock them before executing instructions core function.
*/
if (thd->open_temporary_tables(tables) ||
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)
|| open_and_lock_tables(thd, tables, TRUE, 0))
result= -1;
else
result= 0;
/* Prepare all derived tables/views to catch possible errors. */
if (!result)
result= mysql_handle_derived(thd->lex, DT_PREPARE) ? -1 : 0;
return result;
}
uint sp_instr::get_cont_dest() const
{
return (m_ip+1);
}
int sp_instr::exec_core(THD *thd, uint *nextp)
{
DBUG_ASSERT(0);
return 0;
}
/*
sp_instr_stmt class functions
*/
PSI_statement_info sp_instr_stmt::psi_info=
{ 0, "stmt", 0};
int
sp_instr_stmt::execute(THD *thd, uint *nextp)
{
int res;
bool save_enable_slow_log;
const CSET_STRING query_backup= thd->query_string;
Sub_statement_state backup_state;
DBUG_ENTER("sp_instr_stmt::execute");
DBUG_PRINT("info", ("command: %d", m_lex_keeper.sql_command()));
MYSQL_SET_STATEMENT_TEXT(thd->m_statement_psi, m_query.str, static_cast<uint>(m_query.length));
#if defined(ENABLED_PROFILING)
/* This s-p instr is profilable and will be captured. */
thd->profiling.set_query_source(m_query.str, m_query.length);
#endif
save_enable_slow_log= thd->enable_slow_log;
thd->store_slow_query_state(&backup_state);
if (!(res= alloc_query(thd, m_query.str, m_query.length)) &&
!(res=subst_spvars(thd, this, &m_query)))
{
/*
(the order of query cache and subst_spvars calls is irrelevant because
queries with SP vars can't be cached)
*/
general_log_write(thd, COM_QUERY, thd->query(), thd->query_length());
if (query_cache_send_result_to_client(thd, thd->query(),
thd->query_length()) <= 0)
{
thd->reset_slow_query_state();
res= m_lex_keeper.reset_lex_and_exec_core(thd, nextp, FALSE, this);
bool log_slow= !res && thd->enable_slow_log;
/* Finalize server status flags after executing a statement. */
if (log_slow || thd->get_stmt_da()->is_eof())
thd->update_server_status();
if (thd->get_stmt_da()->is_eof())
thd->protocol->end_statement();
query_cache_end_of_result(thd);
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS,
thd->get_stmt_da()->is_error() ?
thd->get_stmt_da()->sql_errno() : 0,
command_name[COM_QUERY].str);
if (log_slow)
log_slow_statement(thd);
/*
Restore enable_slow_log, that can be changed by a admin or call
command
*/
thd->enable_slow_log= save_enable_slow_log;
/* Add the number of rows to thd for the 'call' statistics */
thd->add_slow_query_state(&backup_state);
}
else
{
/* change statistics */
enum_sql_command save_sql_command= thd->lex->sql_command;
thd->lex->sql_command= SQLCOM_SELECT;
status_var_increment(thd->status_var.com_stat[SQLCOM_SELECT]);
thd->update_stats();
thd->lex->sql_command= save_sql_command;
*nextp= m_ip+1;
}
thd->set_query(query_backup);
thd->query_name_consts= 0;
if (likely(!thd->is_error()))
{
res= 0;
thd->get_stmt_da()->reset_diagnostics_area();
}
}
DBUG_RETURN(res || thd->is_error());
}
void
sp_instr_stmt::print(String *str)
{
size_t i, len;
/* stmt CMD "..." */
if (str->reserve(SP_STMT_PRINT_MAXLEN+SP_INSTR_UINT_MAXLEN+8))
return;
str->qs_append(STRING_WITH_LEN("stmt "));
str->qs_append((uint)m_lex_keeper.sql_command());
str->qs_append(STRING_WITH_LEN(" \""));
len= m_query.length;
/*
Print the query string (but not too much of it), just to indicate which
statement it is.
*/
if (len > SP_STMT_PRINT_MAXLEN)
len= SP_STMT_PRINT_MAXLEN-3;
/* Copy the query string and replace '\n' with ' ' in the process */
for (i= 0 ; i < len ; i++)
{
char c= m_query.str[i];
if (c == '\n')
c= ' ';
str->qs_append(c);
}
if (m_query.length > SP_STMT_PRINT_MAXLEN)
str->qs_append(STRING_WITH_LEN("...")); /* Indicate truncated string */
str->qs_append('"');
}
int
sp_instr_stmt::exec_core(THD *thd, uint *nextp)
{
MYSQL_QUERY_EXEC_START(thd->query(),
thd->thread_id,
thd->get_db(),
&thd->security_ctx->priv_user[0],
(char *)thd->security_ctx->host_or_ip,
3);
int res= mysql_execute_command(thd);
MYSQL_QUERY_EXEC_DONE(res);
*nextp= m_ip+1;
return res;
}
/*
sp_instr_set class functions
*/
PSI_statement_info sp_instr_set::psi_info=
{ 0, "set", 0};
int
sp_instr_set::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_set::execute");
DBUG_PRINT("info", ("offset: %u", m_offset));
DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this));
}
sp_rcontext *sp_instr_set::get_rcontext(THD *thd) const
{
return m_rcontext_handler->get_rcontext(thd->spcont);
}
int
sp_instr_set::exec_core(THD *thd, uint *nextp)
{
int res= get_rcontext(thd)->set_variable(thd, m_offset, &m_value);
delete_explain_query(thd->lex);
*nextp = m_ip+1;
return res;
}
void
sp_instr_set::print(String *str)
{
/* set name@offset ... */
size_t rsrv = SP_INSTR_UINT_MAXLEN+6;
sp_variable *var = m_ctx->find_variable(m_offset);
const LEX_CSTRING *prefix= m_rcontext_handler->get_name_prefix();
/* 'var' should always be non-null, but just in case... */
if (var)
rsrv+= var->name.length + prefix->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("set "));
str->qs_append(prefix->str, prefix->length);
if (var)
{
str->qs_append(&var->name);
str->qs_append('@');
}
str->qs_append(m_offset);
str->qs_append(' ');
m_value->print(str, enum_query_type(QT_ORDINARY |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
/*
sp_instr_set_field class functions
*/
int
sp_instr_set_row_field::exec_core(THD *thd, uint *nextp)
{
int res= get_rcontext(thd)->set_variable_row_field(thd, m_offset,
m_field_offset,
&m_value);
delete_explain_query(thd->lex);
*nextp= m_ip + 1;
return res;
}
void
sp_instr_set_row_field::print(String *str)
{
/* set name@offset[field_offset] ... */
size_t rsrv= SP_INSTR_UINT_MAXLEN + 6 + 6 + 3;
sp_variable *var= m_ctx->find_variable(m_offset);
const LEX_CSTRING *prefix= m_rcontext_handler->get_name_prefix();
DBUG_ASSERT(var);
DBUG_ASSERT(var->field_def.is_row());
const Column_definition *def=
var->field_def.row_field_definitions()->elem(m_field_offset);
DBUG_ASSERT(def);
rsrv+= var->name.length + def->field_name.length + prefix->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("set "));
str->qs_append(prefix);
str->qs_append(&var->name);
str->qs_append('.');
str->qs_append(&def->field_name);
str->qs_append('@');
str->qs_append(m_offset);
str->qs_append('[');
str->qs_append(m_field_offset);
str->qs_append(']');
str->qs_append(' ');
m_value->print(str, enum_query_type(QT_ORDINARY |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
/*
sp_instr_set_field_by_name class functions
*/
int
sp_instr_set_row_field_by_name::exec_core(THD *thd, uint *nextp)
{
int res= get_rcontext(thd)->set_variable_row_field_by_name(thd, m_offset,
m_field_name,
&m_value);
delete_explain_query(thd->lex);
*nextp= m_ip + 1;
return res;
}
void
sp_instr_set_row_field_by_name::print(String *str)
{
/* set name.field@offset["field"] ... */
size_t rsrv= SP_INSTR_UINT_MAXLEN + 6 + 6 + 3 + 2;
sp_variable *var= m_ctx->find_variable(m_offset);
const LEX_CSTRING *prefix= m_rcontext_handler->get_name_prefix();
DBUG_ASSERT(var);
DBUG_ASSERT(var->field_def.is_table_rowtype_ref() ||
var->field_def.is_cursor_rowtype_ref());
rsrv+= var->name.length + 2 * m_field_name.length + prefix->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("set "));
str->qs_append(prefix);
str->qs_append(&var->name);
str->qs_append('.');
str->qs_append(&m_field_name);
str->qs_append('@');
str->qs_append(m_offset);
str->qs_append("[\"",2);
str->qs_append(&m_field_name);
str->qs_append("\"]",2);
str->qs_append(' ');
m_value->print(str, enum_query_type(QT_ORDINARY |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
/*
sp_instr_set_trigger_field class functions
*/
PSI_statement_info sp_instr_set_trigger_field::psi_info=
{ 0, "set_trigger_field", 0};
int
sp_instr_set_trigger_field::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_set_trigger_field::execute");
thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this));
}
int
sp_instr_set_trigger_field::exec_core(THD *thd, uint *nextp)
{
Abort_on_warning_instant_set aws(thd, thd->is_strict_mode() && !thd->lex->ignore);
const int res= (trigger_field->set_value(thd, &value) ? -1 : 0);
*nextp = m_ip+1;
return res;
}
void
sp_instr_set_trigger_field::print(String *str)
{
str->append(STRING_WITH_LEN("set_trigger_field "));
trigger_field->print(str, enum_query_type(QT_ORDINARY |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
str->append(STRING_WITH_LEN(":="));
value->print(str, enum_query_type(QT_ORDINARY |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
/*
sp_instr_opt_meta
*/
uint sp_instr_opt_meta::get_cont_dest() const
{
return m_cont_dest;
}
/*
sp_instr_jump class functions
*/
PSI_statement_info sp_instr_jump::psi_info=
{ 0, "jump", 0};
int
sp_instr_jump::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_jump::execute");
DBUG_PRINT("info", ("destination: %u", m_dest));
*nextp= m_dest;
DBUG_RETURN(0);
}
void
sp_instr_jump::print(String *str)
{
/* jump dest */
if (str->reserve(SP_INSTR_UINT_MAXLEN+5))
return;
str->qs_append(STRING_WITH_LEN("jump "));
str->qs_append(m_dest);
}
uint
sp_instr_jump::opt_mark(sp_head *sp, List<sp_instr> *leads)
{
m_dest= opt_shortcut_jump(sp, this);
if (m_dest != m_ip+1) /* Jumping to following instruction? */
marked= 1;
m_optdest= sp->get_instr(m_dest);
return m_dest;
}
uint
sp_instr_jump::opt_shortcut_jump(sp_head *sp, sp_instr *start)
{
uint dest= m_dest;
sp_instr *i;
while ((i= sp->get_instr(dest)))
{
uint ndest;
if (start == i || this == i)
break;
ndest= i->opt_shortcut_jump(sp, start);
if (ndest == dest)
break;
dest= ndest;
}
return dest;
}
void
sp_instr_jump::opt_move(uint dst, List<sp_instr> *bp)
{
if (m_dest > m_ip)
bp->push_back(this); // Forward
else if (m_optdest)
m_dest= m_optdest->m_ip; // Backward
m_ip= dst;
}
/*
sp_instr_jump_if_not class functions
*/
PSI_statement_info sp_instr_jump_if_not::psi_info=
{ 0, "jump_if_not", 0};
int
sp_instr_jump_if_not::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_jump_if_not::execute");
DBUG_PRINT("info", ("destination: %u", m_dest));
DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this));
}
int
sp_instr_jump_if_not::exec_core(THD *thd, uint *nextp)
{
Item *it;
int res;
it= thd->sp_prepare_func_item(&m_expr);
if (! it)
{
res= -1;
}
else
{
res= 0;
if (! it->val_bool())
*nextp = m_dest;
else
*nextp = m_ip+1;
}
return res;
}
void
sp_instr_jump_if_not::print(String *str)
{
/* jump_if_not dest(cont) ... */
if (str->reserve(2*SP_INSTR_UINT_MAXLEN+14+32)) // Add some for the expr. too
return;
str->qs_append(STRING_WITH_LEN("jump_if_not "));
str->qs_append(m_dest);
str->qs_append('(');
str->qs_append(m_cont_dest);
str->qs_append(STRING_WITH_LEN(") "));
m_expr->print(str, enum_query_type(QT_ORDINARY |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
uint
sp_instr_jump_if_not::opt_mark(sp_head *sp, List<sp_instr> *leads)
{
sp_instr *i;
marked= 1;
if ((i= sp->get_instr(m_dest)))
{
m_dest= i->opt_shortcut_jump(sp, this);
m_optdest= sp->get_instr(m_dest);
}
sp->add_mark_lead(m_dest, leads);
if ((i= sp->get_instr(m_cont_dest)))
{
m_cont_dest= i->opt_shortcut_jump(sp, this);
m_cont_optdest= sp->get_instr(m_cont_dest);
}
sp->add_mark_lead(m_cont_dest, leads);
return m_ip+1;
}
void
sp_instr_jump_if_not::opt_move(uint dst, List<sp_instr> *bp)
{
/*
cont. destinations may point backwards after shortcutting jumps
during the mark phase. If it's still pointing forwards, only
push this for backpatching if sp_instr_jump::opt_move() will not
do it (i.e. if the m_dest points backwards).
*/
if (m_cont_dest > m_ip)
{ // Forward
if (m_dest < m_ip)
bp->push_back(this);
}
else if (m_cont_optdest)
m_cont_dest= m_cont_optdest->m_ip; // Backward
/* This will take care of m_dest and m_ip */
sp_instr_jump::opt_move(dst, bp);
}
/*
sp_instr_freturn class functions
*/
PSI_statement_info sp_instr_freturn::psi_info=
{ 0, "freturn", 0};
int
sp_instr_freturn::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_freturn::execute");
DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this));
}
int
sp_instr_freturn::exec_core(THD *thd, uint *nextp)
{
/*
RETURN is a "procedure statement" (in terms of the SQL standard).
That means, Diagnostics Area should be clean before its execution.
*/
if (!(thd->variables.sql_mode & MODE_ORACLE))
{
/*
Don't clean warnings in ORACLE mode,
as they are needed for SQLCODE and SQLERRM:
BEGIN
SELECT a INTO a FROM t1;
RETURN 'No exception ' || SQLCODE || ' ' || SQLERRM;
EXCEPTION WHEN NO_DATA_FOUND THEN
RETURN 'Exception ' || SQLCODE || ' ' || SQLERRM;
END;
*/
Diagnostics_area *da= thd->get_stmt_da();
da->clear_warning_info(da->warning_info_id());
}
/*
Change <next instruction pointer>, so that this will be the last
instruction in the stored function.
*/
*nextp= UINT_MAX;
/*
Evaluate the value of return expression and store it in current runtime
context.
NOTE: It's necessary to evaluate result item right here, because we must
do it in scope of execution the current context/block.
*/
return thd->spcont->set_return_value(thd, &m_value);
}
void
sp_instr_freturn::print(String *str)
{
/* freturn type expr... */
if (str->reserve(1024+8+32)) // Add some for the expr. too
return;
str->qs_append(STRING_WITH_LEN("freturn "));
LEX_CSTRING name= m_type_handler->name().lex_cstring();
str->qs_append(&name);
str->qs_append(' ');
m_value->print(str, enum_query_type(QT_ORDINARY |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
/*
sp_instr_preturn class functions
*/
PSI_statement_info sp_instr_preturn::psi_info=
{ 0, "preturn", 0};
int
sp_instr_preturn::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_preturn::execute");
*nextp= UINT_MAX;
DBUG_RETURN(0);
}
void
sp_instr_preturn::print(String *str)
{
str->append(STRING_WITH_LEN("preturn"));
}
/*
sp_instr_hpush_jump class functions
*/
PSI_statement_info sp_instr_hpush_jump::psi_info=
{ 0, "hpush_jump", 0};
int
sp_instr_hpush_jump::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_hpush_jump::execute");
int ret= thd->spcont->push_handler(this);
*nextp= m_dest;
DBUG_RETURN(ret);
}
void
sp_instr_hpush_jump::print(String *str)
{
/* hpush_jump dest fsize type */
if (str->reserve(SP_INSTR_UINT_MAXLEN*2 + 21))
return;
str->qs_append(STRING_WITH_LEN("hpush_jump "));
str->qs_append(m_dest);
str->qs_append(' ');
str->qs_append(m_frame);
switch (m_handler->type) {
case sp_handler::EXIT:
str->qs_append(STRING_WITH_LEN(" EXIT"));
break;
case sp_handler::CONTINUE:
str->qs_append(STRING_WITH_LEN(" CONTINUE"));
break;
default:
// The handler type must be either CONTINUE or EXIT.
DBUG_ASSERT(0);
}
}
uint
sp_instr_hpush_jump::opt_mark(sp_head *sp, List<sp_instr> *leads)
{
sp_instr *i;
marked= 1;
if ((i= sp->get_instr(m_dest)))
{
m_dest= i->opt_shortcut_jump(sp, this);
m_optdest= sp->get_instr(m_dest);
}
sp->add_mark_lead(m_dest, leads);
/*
For continue handlers, all instructions in the scope of the handler
are possible leads. For example, the instruction after freturn might
be executed if the freturn triggers the condition handled by the
continue handler.
m_dest marks the start of the handler scope. It's added as a lead
above, so we start on m_dest+1 here.
m_opt_hpop is the hpop marking the end of the handler scope.
*/
if (m_handler->type == sp_handler::CONTINUE)
{
for (uint scope_ip= m_dest+1; scope_ip <= m_opt_hpop; scope_ip++)
sp->add_mark_lead(scope_ip, leads);
}
return m_ip+1;
}
/*
sp_instr_hpop class functions
*/
PSI_statement_info sp_instr_hpop::psi_info=
{ 0, "hpop", 0};
int
sp_instr_hpop::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_hpop::execute");
thd->spcont->pop_handlers(m_count);
*nextp= m_ip+1;
DBUG_RETURN(0);
}
void
sp_instr_hpop::print(String *str)
{
/* hpop count */
if (str->reserve(SP_INSTR_UINT_MAXLEN+5))
return;
str->qs_append(STRING_WITH_LEN("hpop "));
str->qs_append(m_count);
}
/*
sp_instr_hreturn class functions
*/
PSI_statement_info sp_instr_hreturn::psi_info=
{ 0, "hreturn", 0};
int
sp_instr_hreturn::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_hreturn::execute");
uint continue_ip= thd->spcont->exit_handler(thd->get_stmt_da());
*nextp= m_dest ? m_dest : continue_ip;
DBUG_RETURN(0);
}
void
sp_instr_hreturn::print(String *str)
{
/* hreturn framesize dest */
if (str->reserve(SP_INSTR_UINT_MAXLEN*2 + 9))
return;
str->qs_append(STRING_WITH_LEN("hreturn "));
if (m_dest)
{
// NOTE: this is legacy: hreturn instruction for EXIT handler
// should print out 0 as frame index.
str->qs_append(STRING_WITH_LEN("0 "));
str->qs_append(m_dest);
}
else
{
str->qs_append(m_frame);
}
}
uint
sp_instr_hreturn::opt_mark(sp_head *sp, List<sp_instr> *leads)
{
marked= 1;
if (m_dest)
{
/*
This is an EXIT handler; next instruction step is in m_dest.
*/
return m_dest;
}
/*
This is a CONTINUE handler; next instruction step will come from
the handler stack and not from opt_mark.
*/
return UINT_MAX;
}
/*
sp_instr_cpush class functions
*/
PSI_statement_info sp_instr_cpush::psi_info=
{ 0, "cpush", 0};
int
sp_instr_cpush::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_cpush::execute");
sp_cursor::reset(thd, &m_lex_keeper);
m_lex_keeper.disable_query_cache();
thd->spcont->push_cursor(this);
*nextp= m_ip+1;
DBUG_RETURN(false);
}
void
sp_instr_cpush::print(String *str)
{
const LEX_CSTRING *cursor_name= m_ctx->find_cursor(m_cursor);
/* cpush name@offset */
size_t rsrv= SP_INSTR_UINT_MAXLEN+7;
if (cursor_name)
rsrv+= cursor_name->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("cpush "));
if (cursor_name)
{
str->qs_append(cursor_name->str, cursor_name->length);
str->qs_append('@');
}
str->qs_append(m_cursor);
}
/*
sp_instr_cpop class functions
*/
PSI_statement_info sp_instr_cpop::psi_info=
{ 0, "cpop", 0};
int
sp_instr_cpop::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_cpop::execute");
thd->spcont->pop_cursors(thd, m_count);
*nextp= m_ip+1;
DBUG_RETURN(0);
}
void
sp_instr_cpop::print(String *str)
{
/* cpop count */
if (str->reserve(SP_INSTR_UINT_MAXLEN+5))
return;
str->qs_append(STRING_WITH_LEN("cpop "));
str->qs_append(m_count);
}
/*
sp_instr_copen class functions
*/
/**
@todo
Assert that we either have an error or a cursor
*/
PSI_statement_info sp_instr_copen::psi_info=
{ 0, "copen", 0};
int
sp_instr_copen::execute(THD *thd, uint *nextp)
{
/*
We don't store a pointer to the cursor in the instruction to be
able to reuse the same instruction among different threads in future.
*/
sp_cursor *c= thd->spcont->get_cursor(m_cursor);
int res;
DBUG_ENTER("sp_instr_copen::execute");
if (! c)
res= -1;
else
{
sp_lex_keeper *lex_keeper= c->get_lex_keeper();
res= lex_keeper->cursor_reset_lex_and_exec_core(thd, nextp, FALSE, this);
/* TODO: Assert here that we either have an error or a cursor */
}
DBUG_RETURN(res);
}
int
sp_instr_copen::exec_core(THD *thd, uint *nextp)
{
sp_cursor *c= thd->spcont->get_cursor(m_cursor);
int res= c->open(thd);
*nextp= m_ip+1;
return res;
}
void
sp_instr_copen::print(String *str)
{
const LEX_CSTRING *cursor_name= m_ctx->find_cursor(m_cursor);
/* copen name@offset */
size_t rsrv= SP_INSTR_UINT_MAXLEN+7;
if (cursor_name)
rsrv+= cursor_name->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("copen "));
if (cursor_name)
{
str->qs_append(cursor_name->str, cursor_name->length);
str->qs_append('@');
}
str->qs_append(m_cursor);
}
/*
sp_instr_cclose class functions
*/
PSI_statement_info sp_instr_cclose::psi_info=
{ 0, "cclose", 0};
int
sp_instr_cclose::execute(THD *thd, uint *nextp)
{
sp_cursor *c= thd->spcont->get_cursor(m_cursor);
int res;
DBUG_ENTER("sp_instr_cclose::execute");
if (! c)
res= -1;
else
res= c->close(thd);
*nextp= m_ip+1;
DBUG_RETURN(res);
}
void
sp_instr_cclose::print(String *str)
{
const LEX_CSTRING *cursor_name= m_ctx->find_cursor(m_cursor);
/* cclose name@offset */
size_t rsrv= SP_INSTR_UINT_MAXLEN+8;
if (cursor_name)
rsrv+= cursor_name->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("cclose "));
if (cursor_name)
{
str->qs_append(cursor_name->str, cursor_name->length);
str->qs_append('@');
}
str->qs_append(m_cursor);
}
/*
sp_instr_cfetch class functions
*/
PSI_statement_info sp_instr_cfetch::psi_info=
{ 0, "cfetch", 0};
int
sp_instr_cfetch::execute(THD *thd, uint *nextp)
{
sp_cursor *c= thd->spcont->get_cursor(m_cursor);
int res;
Query_arena backup_arena;
DBUG_ENTER("sp_instr_cfetch::execute");
res= c ? c->fetch(thd, &m_varlist, m_error_on_no_data) : -1;
*nextp= m_ip+1;
DBUG_RETURN(res);
}
void
sp_instr_cfetch::print(String *str)
{
List_iterator_fast<sp_variable> li(m_varlist);
sp_variable *pv;
const LEX_CSTRING *cursor_name= m_ctx->find_cursor(m_cursor);
/* cfetch name@offset vars... */
size_t rsrv= SP_INSTR_UINT_MAXLEN+8;
if (cursor_name)
rsrv+= cursor_name->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("cfetch "));
if (cursor_name)
{
str->qs_append(cursor_name->str, cursor_name->length);
str->qs_append('@');
}
str->qs_append(m_cursor);
while ((pv= li++))
{
if (str->reserve(pv->name.length+SP_INSTR_UINT_MAXLEN+2))
return;
str->qs_append(' ');
str->qs_append(&pv->name);
str->qs_append('@');
str->qs_append(pv->offset);
}
}
/*
sp_instr_agg_cfetch class functions
*/
PSI_statement_info sp_instr_agg_cfetch::psi_info=
{ 0, "agg_cfetch", 0};
int
sp_instr_agg_cfetch::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_agg_cfetch::execute");
int res= 0;
if (!thd->spcont->instr_ptr)
{
*nextp= m_ip+1;
thd->spcont->instr_ptr= m_ip + 1;
}
else if (!thd->spcont->pause_state)
thd->spcont->pause_state= TRUE;
else
{
thd->spcont->pause_state= FALSE;
if (thd->server_status & SERVER_STATUS_LAST_ROW_SENT)
{
my_message(ER_SP_FETCH_NO_DATA,
ER_THD(thd, ER_SP_FETCH_NO_DATA), MYF(0));
res= -1;
thd->spcont->quit_func= TRUE;
}
else
*nextp= m_ip + 1;
}
DBUG_RETURN(res);
}
void
sp_instr_agg_cfetch::print(String *str)
{
uint rsrv= SP_INSTR_UINT_MAXLEN+11;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("agg_cfetch"));
}
/*
sp_instr_cursor_copy_struct class functions
*/
/**
This methods processes cursor %ROWTYPE declarations, e.g.:
CURSOR cur IS SELECT * FROM t1;
rec cur%ROWTYPE;
and does the following:
- opens the cursor without copying data (materialization).
- copies the cursor structure to the associated %ROWTYPE variable.
*/
PSI_statement_info sp_instr_cursor_copy_struct::psi_info=
{ 0, "cursor_copy_struct", 0};
int
sp_instr_cursor_copy_struct::exec_core(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_cursor_copy_struct::exec_core");
int ret= 0;
Item_field_row *row= (Item_field_row*) thd->spcont->get_variable(m_var);
DBUG_ASSERT(row->type_handler() == &type_handler_row);
/*
Copy structure only once. If the cursor%ROWTYPE variable is declared
inside a LOOP block, it gets its structure on the first loop interation
and remembers the structure for all consequent loop iterations.
It we recreated the structure on every iteration, we would get
potential memory leaks, and it would be less efficient.
*/
if (!row->arguments())
{
sp_cursor tmp(thd, &m_lex_keeper, true);
// Open the cursor without copying data
if (!(ret= tmp.open(thd)))
{
Row_definition_list defs;
/*
Create row elements on the caller arena.
It's the same arena that was used during sp_rcontext::create().
This puts cursor%ROWTYPE elements on the same mem_root
where explicit ROW elements and table%ROWTYPE reside:
- tmp.export_structure() allocates new Spvar_definition instances
and their components (such as TYPELIBs).
- row->row_create_items() creates new Item_field instances.
They all are created on the same mem_root.
*/
Query_arena current_arena;
thd->set_n_backup_active_arena(thd->spcont->callers_arena, ¤t_arena);
if (!(ret= tmp.export_structure(thd, &defs)))
row->row_create_items(thd, &defs);
thd->restore_active_arena(thd->spcont->callers_arena, ¤t_arena);
tmp.close(thd);
}
}
*nextp= m_ip + 1;
DBUG_RETURN(ret);
}
int
sp_instr_cursor_copy_struct::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_cursor_copy_struct::execute");
int ret= m_lex_keeper.cursor_reset_lex_and_exec_core(thd, nextp, FALSE, this);
DBUG_RETURN(ret);
}
void
sp_instr_cursor_copy_struct::print(String *str)
{
sp_variable *var= m_ctx->find_variable(m_var);
const LEX_CSTRING *name= m_ctx->find_cursor(m_cursor);
str->append(STRING_WITH_LEN("cursor_copy_struct "));
str->append(name);
str->append(' ');
str->append(&var->name);
str->append('@');
str->append_ulonglong(m_var);
}
/*
sp_instr_error class functions
*/
PSI_statement_info sp_instr_error::psi_info=
{ 0, "error", 0};
int
sp_instr_error::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_error::execute");
my_message(m_errcode, ER_THD(thd, m_errcode), MYF(0));
WSREP_DEBUG("sp_instr_error: %s %d", ER_THD(thd, m_errcode), thd->is_error());
*nextp= m_ip+1;
DBUG_RETURN(-1);
}
void
sp_instr_error::print(String *str)
{
/* error code */
if (str->reserve(SP_INSTR_UINT_MAXLEN+6))
return;
str->qs_append(STRING_WITH_LEN("error "));
str->qs_append(m_errcode);
}
/**************************************************************************
sp_instr_set_case_expr class implementation
**************************************************************************/
PSI_statement_info sp_instr_set_case_expr::psi_info=
{ 0, "set_case_expr", 0};
int
sp_instr_set_case_expr::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_set_case_expr::execute");
DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this));
}
int
sp_instr_set_case_expr::exec_core(THD *thd, uint *nextp)
{
int res= thd->spcont->set_case_expr(thd, m_case_expr_id, &m_case_expr);
if (res && !thd->spcont->get_case_expr(m_case_expr_id))
{
/*
Failed to evaluate the value, the case expression is still not
initialized. Set to NULL so we can continue.
*/
Item *null_item= new (thd->mem_root) Item_null(thd);
if (!null_item ||
thd->spcont->set_case_expr(thd, m_case_expr_id, &null_item))
{
/* If this also failed, we have to abort. */
my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATAL));
}
}
else
*nextp= m_ip+1;
return res;
}
void
sp_instr_set_case_expr::print(String *str)
{
/* set_case_expr (cont) id ... */
str->reserve(2*SP_INSTR_UINT_MAXLEN+18+32); // Add some extra for expr too
str->qs_append(STRING_WITH_LEN("set_case_expr ("));
str->qs_append(m_cont_dest);
str->qs_append(STRING_WITH_LEN(") "));
str->qs_append(m_case_expr_id);
str->qs_append(' ');
m_case_expr->print(str, enum_query_type(QT_ORDINARY |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
uint
sp_instr_set_case_expr::opt_mark(sp_head *sp, List<sp_instr> *leads)
{
sp_instr *i;
marked= 1;
if ((i= sp->get_instr(m_cont_dest)))
{
m_cont_dest= i->opt_shortcut_jump(sp, this);
m_cont_optdest= sp->get_instr(m_cont_dest);
}
sp->add_mark_lead(m_cont_dest, leads);
return m_ip+1;
}
void
sp_instr_set_case_expr::opt_move(uint dst, List<sp_instr> *bp)
{
if (m_cont_dest > m_ip)
bp->push_back(this); // Forward
else if (m_cont_optdest)
m_cont_dest= m_cont_optdest->m_ip; // Backward
m_ip= dst;
}
/* ------------------------------------------------------------------ */
/*
Structure that represent all instances of one table
in optimized multi-set of tables used by routine.
*/
typedef struct st_sp_table
{
/*
Multi-set key:
db_name\0table_name\0alias\0 - for normal tables
db_name\0table_name\0 - for temporary tables
*/
LEX_STRING qname;
size_t db_length, table_name_length;
bool temp; /* true if corresponds to a temporary table */
thr_lock_type lock_type; /* lock type used for prelocking */
uint lock_count;
uint query_lock_count;
uint8 trg_event_map;
my_bool for_insert_data;
} SP_TABLE;
uchar *sp_table_key(const uchar *ptr, size_t *plen, my_bool first)
{
SP_TABLE *tab= (SP_TABLE *)ptr;
*plen= tab->qname.length;
return (uchar *)tab->qname.str;
}
/**
Merge the list of tables used by some query into the multi-set of
tables used by routine.
@param thd thread context
@param table table list
@param lex_for_tmp_check LEX of the query for which we are merging
table list.
@note
This method will use LEX provided to check whenever we are creating
temporary table and mark it as such in target multi-set.
@retval
TRUE Success
@retval
FALSE Error
*/
bool
sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check)
{
SP_TABLE *tab;
if ((lex_for_tmp_check->sql_command == SQLCOM_DROP_TABLE ||
lex_for_tmp_check->sql_command == SQLCOM_DROP_SEQUENCE) &&
lex_for_tmp_check->tmp_table())
return TRUE;
for (uint i= 0 ; i < m_sptabs.records ; i++)
{
tab= (SP_TABLE*) my_hash_element(&m_sptabs, i);
tab->query_lock_count= 0;
}
for (; table ; table= table->next_global)
if (!table->derived && !table->schema_table && !table->table_function)
{
/*
Structure of key for the multi-set is "db\0table\0alias\0".
Since "alias" part can have arbitrary length we use String
object to construct the key. By default String will use
buffer allocated on stack with NAME_LEN bytes reserved for
alias, since in most cases it is going to be smaller than
NAME_LEN bytes.
*/
char tname_buff[(SAFE_NAME_LEN + 1) * 3];
String tname(tname_buff, sizeof(tname_buff), &my_charset_bin);
uint temp_table_key_length;
tname.length(0);
tname.append(&table->db);
tname.append('\0');
tname.append(&table->table_name);
tname.append('\0');
temp_table_key_length= tname.length();
tname.append(&table->alias);
tname.append('\0');
/*
Upgrade the lock type because this table list will be used
only in pre-locked mode, in which DELAYED inserts are always
converted to normal inserts.
*/
if (table->lock_type == TL_WRITE_DELAYED)
table->lock_type= TL_WRITE;
/*
We ignore alias when we check if table was already marked as temporary
(and therefore should not be prelocked). Otherwise we will erroneously
treat table with same name but with different alias as non-temporary.
*/
if ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname.ptr(),
tname.length())) ||
((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname.ptr(),
temp_table_key_length)) &&
tab->temp))
{
if (tab->lock_type < table->lock_type)
tab->lock_type= table->lock_type; // Use the table with the highest lock type
tab->query_lock_count++;
if (tab->query_lock_count > tab->lock_count)
tab->lock_count++;
tab->trg_event_map|= table->trg_event_map;
tab->for_insert_data|= table->for_insert_data;
}
else
{
if (!(tab= (SP_TABLE *)thd->calloc(sizeof(SP_TABLE))))
return FALSE;
if ((lex_for_tmp_check->sql_command == SQLCOM_CREATE_TABLE ||
lex_for_tmp_check->sql_command == SQLCOM_CREATE_SEQUENCE) &&
lex_for_tmp_check->query_tables == table &&
lex_for_tmp_check->tmp_table())
{
tab->temp= TRUE;
tab->qname.length= temp_table_key_length;
}
else
tab->qname.length= tname.length();
tab->qname.str= (char*) thd->memdup(tname.ptr(), tab->qname.length);
if (!tab->qname.str)
return FALSE;
tab->table_name_length= table->table_name.length;
tab->db_length= table->db.length;
tab->lock_type= table->lock_type;
tab->lock_count= tab->query_lock_count= 1;
tab->trg_event_map= table->trg_event_map;
tab->for_insert_data= table->for_insert_data;
if (my_hash_insert(&m_sptabs, (uchar *)tab))
return FALSE;
}
}
return TRUE;
}
/**<|fim▁hole|>
Elements of list will be allocated in PS memroot, so this list will be
persistent between PS executions.
@param[in] thd Thread context
@param[in,out] query_tables_last_ptr Pointer to the next_global member of
last element of the list where tables
will be added (or to its root).
@param[in] belong_to_view Uppermost view which uses this routine,
0 if none.
@retval
TRUE if some elements were added
@retval
FALSE otherwise.
*/
bool
sp_head::add_used_tables_to_table_list(THD *thd,
TABLE_LIST ***query_tables_last_ptr,
TABLE_LIST *belong_to_view)
{
uint i;
Query_arena *arena, backup;
bool result= FALSE;
DBUG_ENTER("sp_head::add_used_tables_to_table_list");
/*
Use persistent arena for table list allocation to be PS/SP friendly.
Note that we also have to copy database/table names and alias to PS/SP
memory since current instance of sp_head object can pass away before
next execution of PS/SP for which tables are added to prelocking list.
This will be fixed by introducing of proper invalidation mechanism
once new TDC is ready.
*/
arena= thd->activate_stmt_arena_if_needed(&backup);
for (i=0 ; i < m_sptabs.records ; i++)
{
char *tab_buff, *key_buff;
SP_TABLE *stab= (SP_TABLE*) my_hash_element(&m_sptabs, i);
LEX_CSTRING db_name;
if (stab->temp)
continue;
if (!(tab_buff= (char *)thd->alloc(ALIGN_SIZE(sizeof(TABLE_LIST)) *
stab->lock_count)) ||
!(key_buff= (char*)thd->memdup(stab->qname.str,
stab->qname.length)))
DBUG_RETURN(FALSE);
db_name.str= key_buff;
db_name.length= stab->db_length;
for (uint j= 0; j < stab->lock_count; j++)
{
TABLE_LIST *table= (TABLE_LIST *)tab_buff;
LEX_CSTRING table_name= { key_buff + stab->db_length + 1,
stab->table_name_length };
LEX_CSTRING alias= { table_name.str + table_name.length + 1,
strlen(table_name.str + table_name.length + 1) };
table->init_one_table_for_prelocking(&db_name,
&table_name,
&alias,
stab->lock_type,
TABLE_LIST::PRELOCK_ROUTINE,
belong_to_view,
stab->trg_event_map,
query_tables_last_ptr,
stab->for_insert_data);
tab_buff+= ALIGN_SIZE(sizeof(TABLE_LIST));
result= TRUE;
}
}
if (arena)
thd->restore_active_arena(arena, &backup);
DBUG_RETURN(result);
}
/**
Simple function for adding an explicitly named (systems) table to
the global table list, e.g. "mysql", "proc".
*/
TABLE_LIST *
sp_add_to_query_tables(THD *thd, LEX *lex,
const LEX_CSTRING *db, const LEX_CSTRING *name,
thr_lock_type locktype,
enum_mdl_type mdl_type)
{
TABLE_LIST *table;
if (!(table= (TABLE_LIST *)thd->calloc(sizeof(TABLE_LIST))))
return NULL;
if (!thd->make_lex_string(&table->db, db->str, db->length) ||
!thd->make_lex_string(&table->table_name, name->str, name->length) ||
!thd->make_lex_string(&table->alias, name->str, name->length))
return NULL;
table->lock_type= locktype;
table->select_lex= lex->current_select;
table->cacheable_table= 1;
MDL_REQUEST_INIT(&table->mdl_request, MDL_key::TABLE, table->db.str,
table->table_name.str, mdl_type, MDL_TRANSACTION);
lex->add_to_query_tables(table);
return table;
}
Item *sp_head::adjust_assignment_source(THD *thd, Item *val, Item *val2)
{
return val ? val : val2 ? val2 : new (thd->mem_root) Item_null(thd);
}
/**
Helper action for a SET statement.
Used to push a SP local variable into the assignment list.
@param var_type the SP local variable
@param val the value being assigned to the variable
@return TRUE if error, FALSE otherwise.
*/
bool
sp_head::set_local_variable(THD *thd, sp_pcontext *spcont,
const Sp_rcontext_handler *rh,
sp_variable *spv, Item *val, LEX *lex,
bool responsible_to_free_lex)
{
if (!(val= adjust_assignment_source(thd, val, spv->default_value)))
return true;
if (val->walk(&Item::unknown_splocal_processor, false, NULL))
return true;
sp_instr_set *sp_set= new (thd->mem_root)
sp_instr_set(instructions(), spcont, rh,
spv->offset, val, lex,
responsible_to_free_lex);
return sp_set == NULL || add_instr(sp_set);
}
/**
Similar to set_local_variable(), but for ROW variable fields.
*/
bool
sp_head::set_local_variable_row_field(THD *thd, sp_pcontext *spcont,
const Sp_rcontext_handler *rh,
sp_variable *spv, uint field_idx,
Item *val, LEX *lex)
{
if (!(val= adjust_assignment_source(thd, val, NULL)))
return true;
sp_instr_set_row_field *sp_set= new (thd->mem_root)
sp_instr_set_row_field(instructions(),
spcont, rh,
spv->offset,
field_idx, val,
lex, true);
return sp_set == NULL || add_instr(sp_set);
}
bool
sp_head::set_local_variable_row_field_by_name(THD *thd, sp_pcontext *spcont,
const Sp_rcontext_handler *rh,
sp_variable *spv,
const LEX_CSTRING *field_name,
Item *val, LEX *lex)
{
if (!(val= adjust_assignment_source(thd, val, NULL)))
return true;
sp_instr_set_row_field_by_name *sp_set=
new (thd->mem_root) sp_instr_set_row_field_by_name(instructions(),
spcont, rh,
spv->offset,
*field_name,
val,
lex, true);
return sp_set == NULL || add_instr(sp_set);
}
bool sp_head::add_open_cursor(THD *thd, sp_pcontext *spcont, uint offset,
sp_pcontext *param_spcont,
List<sp_assignment_lex> *parameters)
{
/*
The caller must make sure that the number of formal parameters matches
the number of actual parameters.
*/
DBUG_ASSERT((param_spcont ? param_spcont->context_var_count() : 0) ==
(parameters ? parameters->elements : 0));
if (parameters &&
add_set_cursor_param_variables(thd, param_spcont, parameters))
return true;
sp_instr_copen *i= new (thd->mem_root)
sp_instr_copen(instructions(), spcont, offset);
return i == NULL || add_instr(i);
}
bool sp_head::add_for_loop_open_cursor(THD *thd, sp_pcontext *spcont,
sp_variable *index,
const sp_pcursor *pcursor, uint coffset,
sp_assignment_lex *param_lex,
Item_args *parameters)
{
if (parameters &&
add_set_for_loop_cursor_param_variables(thd, pcursor->param_context(),
param_lex, parameters))
return true;
sp_instr *instr_copy_struct=
new (thd->mem_root) sp_instr_cursor_copy_struct(instructions(),
spcont, coffset,
pcursor->lex(),
index->offset);
if (instr_copy_struct == NULL || add_instr(instr_copy_struct))
return true;
sp_instr_copen *instr_copen=
new (thd->mem_root) sp_instr_copen(instructions(), spcont, coffset);
if (instr_copen == NULL || add_instr(instr_copen))
return true;
sp_instr_cfetch *instr_cfetch=
new (thd->mem_root) sp_instr_cfetch(instructions(),
spcont, coffset, false);
if (instr_cfetch == NULL || add_instr(instr_cfetch))
return true;
instr_cfetch->add_to_varlist(index);
return false;
}
bool
sp_head::add_set_for_loop_cursor_param_variables(THD *thd,
sp_pcontext *param_spcont,
sp_assignment_lex *param_lex,
Item_args *parameters)
{
DBUG_ASSERT(param_spcont->context_var_count() == parameters->argument_count());
for (uint idx= 0; idx < parameters->argument_count(); idx ++)
{
/*
param_lex is shared between multiple items (cursor parameters).
Only the last sp_instr_set is responsible for freeing param_lex.
See more comments in LEX::sp_for_loop_cursor_declarations in sql_lex.cc.
*/
bool last= idx + 1 == parameters->argument_count();
sp_variable *spvar= param_spcont->get_context_variable(idx);
if (set_local_variable(thd, param_spcont,
&sp_rcontext_handler_local,
spvar, parameters->arguments()[idx],
param_lex, last))
return true;
}
return false;
}
bool sp_head::spvar_fill_row(THD *thd,
sp_variable *spvar,
Row_definition_list *defs)
{
spvar->field_def.set_row_field_definitions(defs);
spvar->field_def.field_name= spvar->name;
if (fill_spvar_definition(thd, &spvar->field_def))
return true;
row_fill_field_definitions(thd, defs);
return false;
}
bool sp_head::spvar_fill_type_reference(THD *thd,
sp_variable *spvar,
const LEX_CSTRING &table,
const LEX_CSTRING &col)
{
Qualified_column_ident *ref;
if (!(ref= new (thd->mem_root) Qualified_column_ident(&table, &col)))
return true;
fill_spvar_using_type_reference(spvar, ref);
return false;
}
bool sp_head::spvar_fill_type_reference(THD *thd,
sp_variable *spvar,
const LEX_CSTRING &db,
const LEX_CSTRING &table,
const LEX_CSTRING &col)
{
Qualified_column_ident *ref;
if (!(ref= new (thd->mem_root) Qualified_column_ident(thd, &db, &table, &col)))
return true;
fill_spvar_using_type_reference(spvar, ref);
return false;
}
bool sp_head::spvar_fill_table_rowtype_reference(THD *thd,
sp_variable *spvar,
const LEX_CSTRING &table)
{
Table_ident *ref;
if (!(ref= new (thd->mem_root) Table_ident(&table)))
return true;
fill_spvar_using_table_rowtype_reference(thd, spvar, ref);
return false;
}
bool sp_head::spvar_fill_table_rowtype_reference(THD *thd,
sp_variable *spvar,
const LEX_CSTRING &db,
const LEX_CSTRING &table)
{
Table_ident *ref;
if (!(ref= new (thd->mem_root) Table_ident(thd, &db, &table, false)))
return true;
fill_spvar_using_table_rowtype_reference(thd, spvar, ref);
return false;
}
bool sp_head::check_group_aggregate_instructions_forbid() const
{
if (unlikely(m_flags & sp_head::HAS_AGGREGATE_INSTR))
{
my_error(ER_NOT_AGGREGATE_FUNCTION, MYF(0));
return true;
}
return false;
}
bool sp_head::check_group_aggregate_instructions_require() const
{
if (unlikely(!(m_flags & HAS_AGGREGATE_INSTR)))
{
my_error(ER_INVALID_AGGREGATE_FUNCTION, MYF(0));
return true;
}
return false;
}
bool sp_head::check_group_aggregate_instructions_function() const
{
return agg_type() == GROUP_AGGREGATE ?
check_group_aggregate_instructions_require() :
check_group_aggregate_instructions_forbid();
}
/*
In Oracle mode stored routines have an optional name
at the end of a declaration:
PROCEDURE p1 AS
BEGIN
NULL
END p1;
Check that the first p1 and the last p1 match.
*/
bool sp_head::check_package_routine_end_name(const LEX_CSTRING &end_name) const
{
LEX_CSTRING non_qualified_name= m_name;
const char *errpos;
size_t ofs;
if (!end_name.length)
return false; // No end name
if (!(errpos= strrchr(m_name.str, '.')))
{
errpos= m_name.str;
goto err;
}
errpos++;
ofs= errpos - m_name.str;
non_qualified_name.str+= ofs;
non_qualified_name.length-= ofs;
if (Sp_handler::eq_routine_name(end_name, non_qualified_name))
return false;
err:
my_error(ER_END_IDENTIFIER_DOES_NOT_MATCH, MYF(0), end_name.str, errpos);
return true;
}
bool
sp_head::check_standalone_routine_end_name(const sp_name *end_name) const
{
if (end_name && !end_name->eq(this))
{
my_error(ER_END_IDENTIFIER_DOES_NOT_MATCH, MYF(0),
ErrConvDQName(end_name).ptr(), ErrConvDQName(this).ptr());
return true;
}
return false;
}
ulong sp_head::sp_cache_version() const
{
return m_parent ? m_parent->sp_cache_version() : m_sp_cache_version;
}<|fim▁end|>
|
Add tables used by routine to the table list.
Converts multi-set of tables used by this routine to table list and adds
this list to the end of table list specified by 'query_tables_last_ptr'.
|
<|file_name|>api.rs<|end_file_name|><|fim▁begin|>pub mod collected {
use crate::BenchmarkName;
use crate::Commit;
<|fim▁hole|> #[derive(Debug, PartialEq, Clone, serde::Serialize, serde::Deserialize)]
pub enum Request {
// Will benchmark commit with these benchmarks
BenchmarkCommit {
commit: Commit,
benchmarks: Vec<BenchmarkName>,
},
// benchmark finished for this benchmark/commit
BenchmarkDone {
benchmark: BenchmarkName,
commit: Commit,
},
}
#[derive(Debug, PartialEq, Clone, serde::Serialize, serde::Deserialize)]
pub struct Response {
// nothing
}
}<|fim▁end|>
| |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import browserColor from 'tap-browser-color';
browserColor();
import test from 'tape';
import React from 'react';
import universal from '../../client';
import routes from '../fixtures/routes';
import reducers from '../fixtures/reducers';
const app = universal({ React, routes, reducers });
const store = app();
test('Client app', nest => {
nest.test('...without Express instance', assert => {
const msg = 'should not return an Express instance';
const actual = typeof app.use;
const expected = 'undefined';
assert.equal(actual, expected, msg);
assert.end();
});
nest.test('...initalState', assert => {
const msg = 'should render initialState';
const text = 'Untitled';
const actual = document.querySelectorAll('.title')[0].innerHTML;
const expected = text;
assert.equal(actual, expected, msg);
assert.end();
});<|fim▁hole|> nest.test('...client call', assert => {
const msg = 'should return store instance';
const actual = typeof store.dispatch;
const expected = 'function';
assert.equal(actual, expected, msg);
assert.end();
});
nest.test('...with dispatch', assert => {
const msg = 'should render new output';
const text = 'Client render';
store.dispatch({
type: 'SET_TITLE',
title: text
});
setTimeout(() => {
const actual = document.querySelectorAll('.title')[0].innerHTML;
const expected = text;
assert.equal(actual, expected, msg);
assert.end();
}, 100);
});
nest.test('...with second dispatch', assert => {
const msg = 'should render new output';
const text = 'Client render 2';
store.dispatch({
type: 'SET_TITLE',
title: text
});
setTimeout(() => {
const actual = document.querySelectorAll('.title')[0].innerHTML;
const expected = text;
assert.equal(actual, expected, msg);
assert.end();
}, 100);
});
});<|fim▁end|>
| |
<|file_name|>mmu.py<|end_file_name|><|fim▁begin|>try:
from primitives import Mem
except ImportError:
from mem import Mem
import sys
if sys.version >= '3':
xrange = range
class MMU():
def __init__(self, mem, size=0):
""" Initialize MMU
"""
self._enabled = False
self._mem = mem
self._wordsize = 4
self._table = []
def isEnabled(self):
return self._enabled
def enable(self):
""" Enables MMU
"""
self._enabled = True
def disable(self):
""" Disables MMU
"""
self._enabled = False
def getEntries(self, entries, startpos=None):
""" Get page entries and parse them, handle recursively
>>> from primitives import Mem
>>> m = Mem(1024*1024)
>>> m.setData(0, 0x00000100, 4)
>>> m.setData(4, 0x00001100, 4)
>>> m.setData(8, 0x00002100, 4)
>>> m.setData(12, 0x00003100, 4)
>>> u = MMU(m)
>>> entries = [(0, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': False, 'size2': False, 'write': False, 'subtable': True, 'userspace': False, 'size': 4*1024}), 0),
... (32768, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': True, 'size2': False, 'write': False, 'subtable': False, 'userspace': False, 'size': 64}), 65536),
... (0, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': False, 'size2': False, 'write': False, 'subtable': False, 'userspace': False, 'size': 4}), 131072)]
>>> u.getEntries(entries)
[(0, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0), (4096, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4096), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 8192), (12288, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 12288), (32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 65536), (0, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 131072)]
"""
if startpos is None:
startpos = 0
subs = []
for (addr, flags, pos) in entries:
if flags['subtable']:
size = flags['size'] * 1024 / 4
if flags['ok']:
tmp = self.readTable(addr, size, pos)
entries = self.getEntries(tmp, startpos)
subs += entries
startpos += flags['size'] * 1024
else:
if flags['ok']:
subs.append((addr, flags, pos))
return subs
def initialize(self, tablepos, tablesize):
""" Initializes MMU with a initial page
Does recursive parsing
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Subtable, starts at phys 4k
>>> m.setData(22, 0x00001111, 4)
>>> # Page, virtual start at 32k, size 64k
>>> m.setData(14, 0x00008110, 4)
>>> # Page, virtual start at 98k, size 4k
>>> m.setData(18, 0x00018100, 4)
>>> for i in xrange(1023):
... m.setData(0x1000 + i, 0)
>>> # Page at 8k, size 4k
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at 12k, size 1M
>>> m.setData(0x1004, 0x00003120, 4)
>>> u.initialize(14, 3)
[(32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 0), (98304, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 65536), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 69632), (12288, execute=False,ok=True,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 73728)]
"""
entries = self.readTable(tablepos, tablesize)
self._table = self.getEntries(entries)
return self._table
def diffTime(self, a, b):
d = a - b
print (d.seconds*1000*1000 + d.microseconds)
def readTable(self, tablepos, tablesize, pos=None):
""" Reads table from memory
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Subtable, starts at phys 4k
>>> m.setData(10, 0x00001111, 4)
>>> # Page, starts at 32k, size 64k
>>> m.setData(14, 0x00008110, 4)
>>> for i in xrange(1023):
... m.setData(0x1000 + i, 0)
>>> tmp = u.readTable(10, 3)
>>> tmp[0][0]
4096
>>> tmp[1][0]
32768
>>> tmp[0][1]
execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False
>>> tmp[1][1]
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> tmp[0]
(4096, execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False, 0)
>>> tmp[1]
(32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 67108864)
"""
import datetime
datas = []
if pos is None:
pos = 0
virtpos = tablepos
cnt = 0
# Optimized reading in blocks instead of one byte at time
block = self._mem.getBlock(tablepos, tablesize * 4)
oldtmp = 0
items = 0
preindex = 0
for (bpos, data) in block:
if data is None:
continue
if preindex > 0:
# Do we have old data from previous block?
if preindex == 1:
oldtmp += (data[0] << 24)
if preindex == 2:
oldtmp += (data[0] << 16) + (data[1] << 24)
if preindex == 3:
oldtmp += (data[0] << 8) + (data[1] << 16) + (data[2] << 24)
(ok, pos, res) = self.readEntry(oldtmp, pos)
if ok:
datas.append(res)
tablepos = preindex
datalen = len(data)
l = int(datalen / 4 - 1)
index = tablepos % 0x1000
for item in xrange(l):
tmp = data[index] + (data[index+1] << 8) + (data[index+2] << 16) + (data[index+3] << 24)
(ok, pos, res) = self.readEntry(tmp, pos)
if ok:
datas.append(res)
index += 4
items += 4
if index > datalen - 4:
miss = datalen - index
preindex = 0
# Check if we didn't read all the data...
if miss > 0:
oldtmp = data[index]
if miss > 1:
oldtmp += (data[index+1] << 8)
if miss > 2:
oldtmp += (data[index+2] << 16)
preindex = 4 - miss
break
if items > (tablesize + tablepos):
break
return datas
"""
for index in xrange(tablesize):
tmp = self._mem.getData(virtpos, self._wordsize)
virtpos += self._wordsize
if tmp > 0:
print tmp
cnt += 1
(ok, pos, res) = self.readEntry(tmp, pos)
if ok:
datas.append(res)
return datas
"""
def readEntry(self, data, pos=0):
""" Read entry from one page table item data
>>> m = Mem()
>>> u = MMU(m)
>>> u.readEntry(0x00000000)
(False, 0, (0, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00001000)
(True, 4096, (4096, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00001111)
(True, 67108864, (4096, execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False, 0))
>>> u.readEntry(0x00001022)
(True, 1048576, (4096, execute=True,ok=False,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00002FFF)
(True, 68719476736, (8192, execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True, 0))
>>> u.readEntry(0xFFFFFFFF)
(True, 68719476736, (4294963200, execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True, 0))
>>> u.readEntry(0)
(False, 0, (0, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
"""
if data > 0:
flags = MMU.Flags(data & 0xFFF)
vaddr = data & 0xFFFFF000
ok = True
else:
vaddr = 0
flags = MMU.Flags(data & 0xFFF)
ok = False
return (ok, pos, (vaddr, flags, pos))
return (ok, pos + flags['size'] * 1024, (vaddr, flags, pos))
def getRange(self, item):
addr = item[0]
flags = item[1]
pos = item[2]
endaddr = addr + (flags['size'] * 1024)
return (addr, endaddr, pos)
def virtToPhys(self, pos):
""" Converts virtual memory location to physical
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Page, virtual start at 96k, size 4k (0x1000)
>>> m.setData(14, 0x00018100, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Subtable, starts at phys 4k, size 4M
>>> m.setData(22, 0x00001101, 4)
>>> # Page at virtual at 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual at 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 5)
>>> u.virtToPhys(0x8000) == (0x2000)
True
>>> u.virtToPhys(0x8000)
8192
>>> u.virtToPhys(0x8001)
8193
>>> u.virtToPhys(0x2000)
73728
>>> u.virtToPhys(0x2000) == (0x2000 + 0x10000)
True
>>> u.virtToPhys(0x2010) == (0x2000 + 0x10000 + 0x10)
True
>>> u.virtToPhys(0x2FFF) == (0x2000 + 0x10000 + 0xFFF)
True
>>> u.virtToPhys(0x18000) == 0x1000
True
>>> u.virtToPhys(0x19000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019000
>>> u.virtToPhys(0x19001) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019001
>>> u.virtToPhys(0x1A000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 0001A000
>>> u.virtToPhys(0) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00000000
"""
for item in self._table:
(a, b, c) = self.getRange(item)
if a <= pos and pos < b:
index = (pos - a)
phys = c + index
return phys
raise IndexError('No page mapped at virtual: %.8X' % (pos))
def getPageFlags(self, pos):
""" Get flags at position
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Subtable, starts at phys 4k (0x1000)
>>> m.setData(14, 0x00001101, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Page, virtual start at 96, size 4k (0x1000)
>>> m.setData(22, 0x00018100, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 4)
>>> u.enable()
>>> u.getPageFlags(0x8000)
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x8001)
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x18000)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x18010)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x19000)
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019000
>>> u.getPageFlags(0x19001) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019001
>>> u.getPageFlags(0x1A000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 0001A000
>>> u.getPageFlags(0x18fff)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00000000
"""
if not self._enabled:
return None
for item in self._table:
(a, b, c) = self.getRange(item)
if a <= pos and pos < b:
return item[1]
raise IndexError('No page mapped at virtual: %.8X' % (pos))
def setData(self, pos, data, size=4):
""" Set data, if MMU enabled, solve physical locations first
>>> from primitives import Mem
>>> m = Mem(1024*1024*5)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Page, virtual start at 96, size 4k (0x1000)
>>> m.setData(14, 0x00018100, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Subtable, starts at phys 4k, size 4M (0x1000)
>>> m.setData(22, 0x00001101, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 4)
>>> # Paging is disabled, set data to phys 0x8000
>>> u.setData(0x8000, 56, 1)
>>> # Enable paging
>>> u.enable()
>>> # Paging is enabled so set data to virt 0x8000, which is 0x2000 in phys
>>> u.setData(0x8000, 42, 1)
>>> # Get memory contents at 0x8000 phys
>>> m.getData(0x8000, 1)
56
>>> # Get memory contents at 0x2000 phys
>>> m.getData(0x2000, 1)
42
"""
if self._enabled:
self._mem.setData(self.virtToPhys(pos), data, size)
else:
self._mem.setData(pos, data, size)
def getData(self, pos, size=1):
""" Get data, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Subtable, starts at phys 4k (0x1000)
>>> m.setData(14, 0x00001101, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Page, virtual start at 96k, size 4k (0x1000)
>>> m.setData(22, 0x00018100, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> u.initialize(10, 4)
[(24576, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4096), (1126400, execute=False,ok=True,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 8192), (32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 4198400), (98304, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4263936)]
>>> # Paging is disabled, set data to phys 0x8000
>>> u.setData(0x8000, 56, 1)
>>> # Paging is disabled, set data to phys 0x100
>>> u.setData(0x100, 12345, 4)
>>> # Enable paging
>>> u.enable()
>>> # Paging is enabled so set data to virt 0x8000, which is 0x2000 in phys
>>> u.setData(0x8000, 42, 1)
>>> # Get memory contents at 0x8000 virt
>>> u.getData(0x8000, 1)
42
>>> # Get memory contents at 0x100 phys, 0x6000+0x100 virt
>>> u.getData(0x6000 + 0x100, 4)
12345
"""
if self._enabled:
return self._mem.getData(self.virtToPhys(pos), size)
else:
return self._mem.getData(pos, size)
def setRaw(self, pos, data):
""" Set one byte, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> tmp = u.initialize(10, 1)
>>> u.setRaw(0x100, 255)
>>> u.enable()
>>> u.setRaw(0x6001, 123)
>>> m.getRaw(0x100)
255
>>> m.getRaw(0x1)
123
"""
if self._enabled:
self._mem.setRaw(self.virtToPhys(pos), data)
else:
self._mem.setRaw(pos, data)
def getRaw(self, pos):
""" Get one byte, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> tmp = u.initialize(10, 1)
>>> u.setRaw(0x100, 255)
>>> u.enable()
>>> u.setRaw(0x6001, 123)
>>> m.getRaw(0x100)
255
>>> m.getRaw(0x1)
123
>>> u.getRaw(0x6001)
123
>>> u.getRaw(0x6000)
0
"""
if self._enabled:
return self._mem.getRaw(self.virtToPhys(pos))
else:
return self._mem.getRaw(pos)
class Flags:
def __init__(self, flags=0, solved=None):
""" Initialize flags
"""
self._flags = flags
if solved is None:
self._data = self.solveFlags(flags)
else:
self._data = solved
def solveFlags(self, flags):
""" Solve flags from given number data
>>> f = MMU.Flags()
>>> r = f.solveFlags(0x1)
>>> f
execute=False,ok=False,size=4096,size1=False,size2=False,subtable=True,userspace=False,write=False
>>> r = f.solveFlags(0x2)
>>> f
execute=True,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x4)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=True
>>> r = f.solveFlags(0x8)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=True,write=False
>>> r = f.solveFlags(0x10)
>>> f
execute=False,ok=False,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x20)
>>> f
execute=False,ok=False,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x30)
>>> f
execute=False,ok=False,size=65536,size1=True,size2=True,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x40)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0xFF)
>>> f
execute=True,ok=False,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True
>>> r = f.solveFlags(0x1FF)
>>> f
execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True
"""
data = {
'subtable': False,
'execute': False,
'write': False,
'userspace': False,
'size': 0,
'size1': False,
'size2': False,
'ok': False,
}
#'size3': False,<|fim▁hole|> if flags & 0x1 == 0x1:
data['subtable'] = True
if flags & 0x2 == 0x2:
data['execute'] = True
if flags & 0x4 == 0x4:
data['write'] = True
if flags & 0x8 == 0x8:
data['userspace'] = True
if flags & 0x10 == 0x10:
data['size1'] = True
if flags & 0x20 == 0x20:
data['size2'] = True
if flags & 0x100 == 0x100:
data['ok'] = True
# Determine page size in kilobytes
if not data['size1'] and not data['size2']:
data['size'] = 4
elif data['size1'] and not data['size2']:
data['size'] = 64
elif not data['size1'] and data['size2']:
data['size'] = 1024
elif data['size1'] and data['size2']:
data['size'] = 1024 * 64
# For subtables multiply by 1024
if data['subtable']:
data['size'] *= 1024
self._data = data
return data
def isSet(self, name):
""" Checks whether element is set, or get value
>>> f = MMU.Flags(0x1F)
>>> f.isSet('size')
65536
>>> f.isSet('size1')
True
>>> f.isSet('size2')
False
>>> f.isSet('subtable')
True
"""
if not name in self._data:
return False
return self._data[name]
def __getitem__(self, name):
if not name in self._data:
return None
return self._data[name]
def dump(self):
""" Dumps the flag status
"""
return self._data
def __repr__(self):
""" Get string representation of the flags
"""
#return "%s" % self.dump()
a = self._data.keys()
res = ''
for k in sorted(a):
if res:
res += ','
res += '%s=%s' % (k, self._data[k])
return res
"""
MMU
Initial table
if __name__ == "__main__":
import doctest
doctest.run_docstring_examples(MMU.initialize, globals())
"""<|fim▁end|>
| |
<|file_name|>requestService.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { TPromise, Promise } from 'vs/base/common/winjs.base';
import { xhr } from 'vs/base/common/network';
import { IConfigurationRegistry, Extensions } from 'vs/platform/configuration/common/configurationRegistry';
import strings = require('vs/base/common/strings');
import nls = require('vs/nls');
import { IDisposable, dispose } from 'vs/base/common/lifecycle';
import platform = require('vs/platform/platform');
import { IConfigurationService } from 'vs/platform/configuration/common/configuration';
import { BaseRequestService } from 'vs/platform/request/common/baseRequestService';<|fim▁hole|>import { IWorkspaceContextService } from 'vs/platform/workspace/common/workspace';
import { assign } from 'vs/base/common/objects';
import { IXHROptions, IXHRResponse } from 'vs/base/common/http';
import { request } from 'vs/base/node/request';
import { getProxyAgent } from 'vs/base/node/proxy';
import { createGunzip } from 'zlib';
import { Stream } from 'stream';
interface IHTTPConfiguration {
http?: {
proxy?: string;
proxyStrictSSL?: boolean;
};
}
export class RequestService extends BaseRequestService {
private disposables: IDisposable[];
private proxyUrl: string = null;
private strictSSL: boolean = true;
constructor(
@IWorkspaceContextService contextService: IWorkspaceContextService,
@IConfigurationService configurationService: IConfigurationService,
@ITelemetryService telemetryService?: ITelemetryService
) {
super(contextService, telemetryService);
this.disposables = [];
const config = configurationService.getConfiguration<IHTTPConfiguration>();
this.configure(config);
const disposable = configurationService.onDidUpdateConfiguration(e => this.configure(e.config));
this.disposables.push(disposable);
}
private configure(config: IHTTPConfiguration) {
this.proxyUrl = config.http && config.http.proxy;
this.strictSSL = config.http && config.http.proxyStrictSSL;
}
makeRequest(options: IXHROptions): TPromise<IXHRResponse> {
let url = options.url;
if (!url) {
throw new Error('IRequestService.makeRequest: Url is required.');
}
// Support file:// in native environment through XHR
if (strings.startsWith(url, 'file://')) {
return xhr(options).then(null, (xhr: XMLHttpRequest) => {
if (xhr.status === 0 && xhr.responseText) {
return xhr; // loading resources locally returns a status of 0 which in WinJS is an error so we need to handle it here
}
return <any>Promise.wrapError({ status: 404, responseText: nls.localize('localFileNotFound', "File not found.")});
});
}
return super.makeRequest(options);
}
protected makeCrossOriginRequest(options: IXHROptions): TPromise<IXHRResponse> {
const { proxyUrl, strictSSL } = this;
const agent = getProxyAgent(options.url, { proxyUrl, strictSSL });
options = assign({}, options);
options = assign(options, { agent, strictSSL });
return request(options).then(result => new TPromise<IXHRResponse>((c, e, p) => {
const res = result.res;
let stream: Stream = res;
if (res.headers['content-encoding'] === 'gzip') {
stream = stream.pipe(createGunzip());
}
const data: string[] = [];
stream.on('data', c => data.push(c));
stream.on('end', () => {
const status = res.statusCode;
if (options.followRedirects > 0 && (status >= 300 && status <= 303 || status === 307)) {
let location = res.headers['location'];
if (location) {
let newOptions = {
type: options.type, url: location, user: options.user, password: options.password, responseType: options.responseType, headers: options.headers,
timeout: options.timeout, followRedirects: options.followRedirects - 1, data: options.data
};
xhr(newOptions).done(c, e, p);
return;
}
}
const response: IXHRResponse = {
responseText: data.join(''),
status,
getResponseHeader: header => res.headers[header],
readyState: 4
};
if ((status >= 200 && status < 300) || status === 1223) {
c(response);
} else {
e(response);
}
});
}, err => {
let message: string;
if (agent) {
message = 'Unable to to connect to ' + options.url + ' through a proxy . Error: ' + err.message;
} else {
message = 'Unable to to connect to ' + options.url + '. Error: ' + err.message;
}
return TPromise.wrapError<IXHRResponse>({
responseText: message,
status: 404
});
}));
}
dispose(): void {
this.disposables = dispose(this.disposables);
}
}
// Configuration
let confRegistry = <IConfigurationRegistry>platform.Registry.as(Extensions.Configuration);
confRegistry.registerConfiguration({
id: 'http',
order: 15,
title: nls.localize('httpConfigurationTitle', "HTTP configuration"),
type: 'object',
properties: {
'http.proxy': {
type: 'string',
pattern: '^https?://[^:]+(:\\d+)?$|^$',
description: nls.localize('proxy', "The proxy setting to use. If not set will be taken from the http_proxy and https_proxy environment variables")
},
'http.proxyStrictSSL': {
type: 'boolean',
default: true,
description: nls.localize('strictSSL', "Whether the proxy server certificate should be verified against the list of supplied CAs.")
}
}
});<|fim▁end|>
|
import { ITelemetryService } from 'vs/platform/telemetry/common/telemetry';
|
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, include, url
from django.shortcuts import redirect, render_to_response
from django.template.context import RequestContext
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
# Just redirect / to /blog for now until I can
# come up with something to put on the homepage..
def to_blog(request):
return redirect('/blog/', permanent=False)
# Follow the BSD license and allow the source/binary to reproduce
# the license and copyright message
def sslicense(request):
slicense = """
Copyright (c) 2012-2013 Justin Crawford <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE
"""
ctx = {
'parts': {
"title": "License",
"html_title": "License",
"fragment": slicense.replace('\n', '<br>'),
},<|fim▁hole|>
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'StackSmash.views.home', name='home'),
# url(r'^StackSmash/', include('StackSmash.foo.urls')),
# TODO: Fix index and use something... Should identify subdomains somehow..
#url(r'^$', include('StackSmash.apps.blog.urls')),
url(r'^license/', sslicense, name='license'),
#url(r'^docs/', include('StackSmash.apps.docs.urls'), name='docs', app_name='docs'),
url(r'^blog/', include('StackSmash.apps.blog.urls', namespace='blog')),
url(r'^projects/', include('StackSmash.apps.projects.urls', namespace='projects')),
url(r'^upload/', include('StackSmash.apps.uploader.urls', namespace='upload')),
url(r'^$', to_blog, name='index'),
#url(r'^projects/', include('StackSmash.apps.projects.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls), name='admin'),
)<|fim▁end|>
|
}
return render_to_response('docs/docs.html', RequestContext(request, ctx))
|
<|file_name|>gcpubsub_info.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# Copyright 2016 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcpubsub_info
version_added: "2.3"
short_description: List Topics/Subscriptions and Messages from Google PubSub.
description:
- List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
topic/subscription management.
See U(https://cloud.google.com/pubsub/docs) for an overview.
- This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
requirements:
- "python >= 2.6"
- "google-auth >= 0.5.0"
- "google-cloud-pubsub >= 0.22.0"
notes:
- list state enables user to list topics or subscriptions in the project. See examples for details.
author:
- "Tom Melendez (@supertom) <[email protected]>"
options:
topic:
description:
- GCP pubsub topic name. Only the name, not the full path, is required.
required: False
view:
description:
- Choices are 'topics' or 'subscriptions'
required: True
state:
description:
- list is the only valid option.
required: False
'''
EXAMPLES = '''
## List all Topics in a project
- gcpubsub_info:
view: topics
state: list
## List all Subscriptions in a project
- gcpubsub_info:
view: subscriptions
state: list
## List all Subscriptions for a Topic in a project
- gcpubsub_info:
view: subscriptions
topic: my-topic
state: list
'''
RETURN = '''
subscriptions:
description: List of subscriptions.
returned: When view is set to subscriptions.
type: list
sample: ["mysubscription", "mysubscription2"]
topic:
description: Name of topic. Used to filter subscriptions.
returned: Always
type: str
sample: "mytopic"
topics:
description: List of topics.
returned: When view is set to topics.
type: list
sample: ["mytopic", "mytopic2"]
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import pubsub
HAS_GOOGLE_CLOUD_PUBSUB = True<|fim▁hole|>from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
def list_func(data, member='name'):
"""Used for state=list."""
return [getattr(x, member) for x in data]
def main():
module = AnsibleModule(argument_spec=dict(
view=dict(choices=['topics', 'subscriptions'], default='topics'),
topic=dict(required=False),
state=dict(choices=['list'], default='list'),
service_account_email=dict(),
credentials_file=dict(),
project_id=dict(), ),)
if module._name == 'gcpubsub_facts':
module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'", version='2.13')
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")
CLIENT_MINIMUM_VERSION = '0.22.0'
if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
mod_params = {}
mod_params['state'] = module.params.get('state')
mod_params['topic'] = module.params.get('topic')
mod_params['view'] = module.params.get('view')
creds, params = get_google_cloud_credentials(module)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
pubsub_client.user_agent = 'ansible-pubsub-0.1'
json_output = {}
if mod_params['view'] == 'topics':
json_output['topics'] = list_func(pubsub_client.list_topics())
elif mod_params['view'] == 'subscriptions':
if mod_params['topic']:
t = pubsub_client.topic(mod_params['topic'])
json_output['subscriptions'] = list_func(t.list_subscriptions())
else:
json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
json_output['changed'] = False
json_output.update(mod_params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()<|fim▁end|>
|
except ImportError as e:
HAS_GOOGLE_CLOUD_PUBSUB = False
|
<|file_name|>regression_fuzz.rs<|end_file_name|><|fim▁begin|>// These tests are only run for the "default" test target because some of them
// can take quite a long time. Some of them take long enough that it's not
// practical to run them in debug mode. :-/
// See: https://oss-fuzz.com/testcase-detail/5673225499181056
//
// Ignored by default since it takes too long in debug mode (almost a minute).
#[test]
#[ignore]
fn fuzz1() {
regex!(r"1}{55}{0}*{1}{55}{55}{5}*{1}{55}+{56}|;**");
}
// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505
// See: https://github.com/rust-lang/regex/issues/722
#[test]
fn empty_any_errors_no_panic() {
assert!(regex_new!(r"\P{any}").is_err());
}
// This tests that a very large regex errors during compilation instead of
// using gratuitous amounts of memory. The specific problem is that the
// compiler wasn't accounting for the memory used by Unicode character classes
// correctly.
//
// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579
#[test]<|fim▁hole|>fn big_regex_fails_to_compile() {
let pat = "[\u{0}\u{e}\u{2}\\w~~>[l\t\u{0}]p?<]{971158}";
assert!(regex_new!(pat).is_err());
}<|fim▁end|>
| |
<|file_name|>context.go<|end_file_name|><|fim▁begin|>// This file is part of graze/golang-service
//
// Copyright (c) 2016 Nature Delivered Ltd. <https://www.graze.com>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
//
// license: https://github.com/graze/golang-service/blob/master/LICENSE
// link: https://github.com/graze/golang-service
package handlers
import (
"net/http"
"github.com/graze/golang-service/log"
uuid "github.com/satori/go.uuid"
)
// logContextHandler contains a local logger context and the handler
type logContextHandler struct {<|fim▁hole|>
// ServeHTTP modifies the context of the request by adding a local logger context
func (h logContextHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
url := *req.URL
ip := ""
if userIP, err := getUserIP(req); err == nil {
ip = userIP.String()
}
ctx := h.logger.Ctx(req.Context()).With(log.KV{
"transaction": uuid.NewV4().String(),
"http.method": req.Method,
"http.protocol": req.Proto,
"http.uri": parseURI(req, url),
"http.path": uriPath(req, url),
"http.host": req.Host,
"http.user": ip,
"http.ref": req.Referer(),
"http.user-agent": req.Header.Get("User-Agent"),
}).NewContext(req.Context())
h.handler.ServeHTTP(w, req.WithContext(ctx))
}
// LoggingContextHandler returns a handler that adds `http` and `transaction` items into the provided logging context
//
// It adds the following fields to the `LoggingResponseWriter` log context:
// http.method - GET/POST/...
// http.protocol - HTTP/1.1
// http.uri - /path?with=query
// http.path - /path
// http.host - localhost:80
// http.user - 192.168.0.1 - ip address of the user
// http.ref - http://google.com - referrer
// http.user-agent - The user agent of the user
// transaction - unique uuid4 for this request
func LoggingContextHandler(logger log.FieldLogger, h http.Handler) http.Handler {
return logContextHandler{logger.With(log.KV{}), h}
}<|fim▁end|>
|
logger log.FieldLogger
handler http.Handler
}
|
<|file_name|>test11_cross_validate_objects_1200ms_scaled_method_v.py<|end_file_name|><|fim▁begin|># Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions<|fim▁hole|> mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()<|fim▁end|>
|
num_data,dim = X.shape
#center data
|
<|file_name|>buffer_map.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use geom::size::Size2D;
use layers::platform::surface::NativePaintingGraphicsContext;
use layers::layers::LayerBuffer;
use std::hash::{Hash, Hasher};
use std::mem;
/// This is a struct used to store buffers when they are not in use.
/// The paint task can quickly query for a particular size of buffer when it
/// needs it.
pub struct BufferMap {
/// A HashMap that stores the Buffers.
map: HashMap<BufferKey, BufferValue>,
/// The current amount of memory stored by the BufferMap's buffers.
mem: usize,
/// The maximum allowed memory. Unused buffers will be deleted
/// when this threshold is exceeded.
max_mem: usize,
/// A monotonically increasing counter to track how recently tile sizes were used.
counter: usize,
}
/// A key with which to store buffers. It is based on the size of the buffer.
#[derive(Eq, Copy, Clone)]
struct BufferKey([usize; 2]);
impl Hash for BufferKey {
fn hash<H: Hasher>(&self, state: &mut H) {
let BufferKey(ref bytes) = *self;
bytes.hash(state);
}
}
impl PartialEq for BufferKey {
fn eq(&self, other: &BufferKey) -> bool {
let BufferKey(s) = *self;
let BufferKey(o) = *other;
s[0] == o[0] && s[1] == o[1]
}
}
/// Create a key from a given size
impl BufferKey {
fn get(input: Size2D<usize>) -> BufferKey {
BufferKey([input.width, input.height])
}
}
/// A helper struct to keep track of buffers in the HashMap
struct BufferValue {
/// An array of buffers, all the same size
buffers: Vec<Box<LayerBuffer>>,
/// The counter when this size was last requested
last_action: usize,
}
impl BufferMap {
// Creates a new BufferMap with a given buffer limit.
pub fn new(max_mem: usize) -> BufferMap {
BufferMap {
map: HashMap::new(),
mem: 0,
max_mem: max_mem,
counter: 0,
}
}
/// Insert a new buffer into the map.
pub fn insert(&mut self, graphics_context: &NativePaintingGraphicsContext, new_buffer: Box<LayerBuffer>) {
let new_key = BufferKey::get(new_buffer.get_size_2d());
// If all our buffers are the same size and we're already at our
// memory limit, no need to store this new buffer; just let it drop.
if self.mem + new_buffer.get_mem() > self.max_mem && self.map.len() == 1 &&
self.map.contains_key(&new_key) {
new_buffer.destroy(graphics_context);
return;
}
self.mem += new_buffer.get_mem();
// use lazy insertion function to prevent unnecessary allocation
let counter = &self.counter;
match self.map.entry(new_key) {
Occupied(entry) => {
entry.into_mut().buffers.push(new_buffer);
}
Vacant(entry) => {<|fim▁hole|> }
}
let mut opt_key: Option<BufferKey> = None;
while self.mem > self.max_mem {
let old_key = match opt_key {
Some(key) => key,
None => {
match self.map.iter().min_by(|&(_, x)| x.last_action) {
Some((k, _)) => *k,
None => panic!("BufferMap: tried to delete with no elements in map"),
}
}
};
if {
let list = &mut self.map.get_mut(&old_key).unwrap().buffers;
let condemned_buffer = list.pop().take().unwrap();
self.mem -= condemned_buffer.get_mem();
condemned_buffer.destroy(graphics_context);
list.is_empty()
}
{ // then
self.map.remove(&old_key); // Don't store empty vectors!
opt_key = None;
} else {
opt_key = Some(old_key);
}
}
}
// Try to find a buffer for the given size.
pub fn find(&mut self, size: Size2D<usize>) -> Option<Box<LayerBuffer>> {
let mut flag = false; // True if key needs to be popped after retrieval.
let key = BufferKey::get(size);
let ret = match self.map.get_mut(&key) {
Some(ref mut buffer_val) => {
buffer_val.last_action = self.counter;
self.counter += 1;
let buffer = buffer_val.buffers.pop().take().unwrap();
self.mem -= buffer.get_mem();
if buffer_val.buffers.is_empty() {
flag = true;
}
Some(buffer)
}
None => None,
};
if flag {
self.map.remove(&key); // Don't store empty vectors!
}
ret
}
/// Destroys all buffers.
pub fn clear(&mut self, graphics_context: &NativePaintingGraphicsContext) {
let map = mem::replace(&mut self.map, HashMap::new());
for (_, value) in map.into_iter() {
for tile in value.buffers.into_iter() {
tile.destroy(graphics_context)
}
}
self.mem = 0
}
}<|fim▁end|>
|
entry.insert(BufferValue {
buffers: vec!(new_buffer),
last_action: *counter,
});
|
<|file_name|>Utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import inspect
import os
import sys
import threading
import time
import wx
from CommonMark import commonmark
from ctypes import c_ulonglong, windll
from datetime import datetime as dt, timedelta as td
from docutils.core import publish_parts as ReSTPublishParts
from docutils.writers.html4css1 import Writer
from functools import update_wrapper
from os.path import abspath, dirname, exists, join
from types import ClassType
# Local imports
import eg
__all__ = [
"Bunch", "NotificationHandler", "LogIt", "LogItWithReturn", "TimeIt",
"AssertInMainThread", "AssertInActionThread", "ParseString", "SetDefault",
"EnsureVisible", "VBoxSizer", "HBoxSizer", "EqualizeWidths", "AsTasklet",
"ExecFile", "GetTopLevelWindow",
]
USER_CLASSES = (type, ClassType)
class Bunch(object):
"""
Universal collection of a bunch of named stuff.
Often we want to just collect a bunch of stuff together, naming each
item of the bunch. A dictionary is OK for that; however, when names are
constants and to be used just like variables, the dictionary-access syntax
("if bunch['squared'] > threshold", etc) is not maximally clear. It takes
very little effort to build a little class, as in this 'Bunch', that will
both ease the initialisation task and provide elegant attribute-access
syntax ("if bunch.squared > threshold", etc).
Usage is simple::
point = eg.Bunch(x=100, y=200)
# and of course you can read/write the named
# attributes you just created, add others, del
# some of them, etc, etc:
point.squared = point.x * point.y
if point.squared > threshold:
point.isok = True
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class HBoxSizer(wx.BoxSizer): #IGNORE:R0904
def __init__(self, *items):
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
self.AddMany(items)
class MyHtmlDocWriter(Writer):
def apply_template(self):
return """\
%(head_prefix)s
%(head)s
%(stylesheet)s
%(body_prefix)s
%(body_pre_docinfo)s
%(docinfo)s
%(body)s
%(body_suffix)s
""" % self.interpolation_dict()
HTML_DOC_WRITER = MyHtmlDocWriter()
class NotificationHandler(object):
__slots__ = ["listeners"]
def __init__(self):
self.listeners = []
class VBoxSizer(wx.BoxSizer): #IGNORE:R0904
def __init__(self, *items):
wx.BoxSizer.__init__(self, wx.VERTICAL)
self.AddMany(items)
def AppUrl(description, url):
if url:
txt = '<p><div align=right><i><font color="#999999" size=-1>%s <a href="%s">%s</a>.</font></i></div></p>' % (
eg.text.General.supportSentence,
url,
eg.text.General.supportLink
)
else:
return description
if description.startswith("<md>"):
description = description[4:]
description = DecodeMarkdown(description)
elif description.startswith("<rst>"):
description = description[5:]
description = DecodeReST(description)
return description + txt
def AssertInActionThread(func):
if not eg.debugLevel:
return func
def AssertWrapper(*args, **kwargs):
if eg.actionThread._ThreadWorker__thread != threading.currentThread():
raise AssertionError(
"Called outside ActionThread: %s() in %s" %
(func.__name__, func.__module__)
)
return func(*args, **kwargs)
return func(*args, **kwargs)
return update_wrapper(AssertWrapper, func)
def AssertInMainThread(func):
if not eg.debugLevel:
return func
def AssertWrapper(*args, **kwargs):
if eg.mainThread != threading.currentThread():
raise AssertionError(
"Called outside MainThread: %s in %s" %
(func.__name__, func.__module__)
)
return func(*args, **kwargs)
return update_wrapper(AssertWrapper, func)
def AsTasklet(func):
def Wrapper(*args, **kwargs):
eg.Tasklet(func)(*args, **kwargs).run()
return update_wrapper(Wrapper, func)
def CollectGarbage():
import gc
#gc.set_debug(gc.DEBUG_SAVEALL)
#gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
from pprint import pprint
print "threshold:", gc.get_threshold()
print "unreachable object count:", gc.collect()
garbageList = gc.garbage[:]
for i, obj in enumerate(garbageList):
print "Object Num %d:" % i
pprint(obj)
#print "Referrers:"
#print(gc.get_referrers(o))
#print "Referents:"
#print(gc.get_referents(o))
print "Done."
#print "unreachable object count:", gc.collect()
#from pprint import pprint
#pprint(gc.garbage)
def DecodeMarkdown(source):
return commonmark(source)
def DecodeReST(source):
#print repr(source)
res = ReSTPublishParts(
source=PrepareDocstring(source),
writer=HTML_DOC_WRITER,
settings_overrides={"stylesheet_path": ""}<|fim▁hole|> #print repr(res)
return res['body']
def EnsureVisible(window):
"""
Ensures the given wx.TopLevelWindow is visible on the screen.
Moves and resizes it if necessary.
"""
from eg.WinApi.Dynamic import (
sizeof, byref, GetMonitorInfo, MonitorFromWindow, GetWindowRect,
MONITORINFO, RECT, MONITOR_DEFAULTTONEAREST,
# MonitorFromRect, MONITOR_DEFAULTTONULL,
)
hwnd = window.GetHandle()
windowRect = RECT()
GetWindowRect(hwnd, byref(windowRect))
#hMonitor = MonitorFromRect(byref(windowRect), MONITOR_DEFAULTTONULL)
#if hMonitor:
# return
parent = window.GetParent()
if parent:
hwnd = parent.GetHandle()
hMonitor = MonitorFromWindow(hwnd, MONITOR_DEFAULTTONEAREST)
monInfo = MONITORINFO()
monInfo.cbSize = sizeof(MONITORINFO)
GetMonitorInfo(hMonitor, byref(monInfo))
displayRect = monInfo.rcWork
left = windowRect.left
right = windowRect.right
top = windowRect.top
bottom = windowRect.bottom
# shift the window horizontally into the display area
if left < displayRect.left:
right += (displayRect.left - left)
left = displayRect.left
if right > displayRect.right:
right = displayRect.right
elif right > displayRect.right:
left += (displayRect.right - right)
right = displayRect.right
if left < displayRect.left:
left = displayRect.left
# shift the window vertically into the display area
if top < displayRect.top:
bottom += (displayRect.top - top)
top = displayRect.top
if bottom > displayRect.bottom:
bottom = displayRect.bottom
elif bottom > displayRect.bottom:
top += (displayRect.bottom - bottom)
bottom = displayRect.bottom
if top < displayRect.top:
top = displayRect.top
# set the new position and size
window.SetRect((left, top, right - left, bottom - top))
def EqualizeWidths(ctrls):
maxWidth = max((ctrl.GetBestSize()[0] for ctrl in ctrls))
for ctrl in ctrls:
ctrl.SetMinSize((maxWidth, -1))
def ExecFile(filename, globals=None, locals=None):
"""
Replacement for the Python built-in execfile() function, but handles
unicode filenames right.
"""
FSE = sys.getfilesystemencoding()
flnm = filename.encode(FSE) if isinstance(filename, unicode) else filename
return execfile(flnm, globals, locals)
def GetBootTimestamp(unix_timestamp = True):
"""
Returns the time of the last system boot.
If unix_timestamp == True, result is a unix temestamp.
Otherwise it is in human readable form.
"""
now = time.time()
GetTickCount64 = windll.kernel32.GetTickCount64
GetTickCount64.restype = c_ulonglong
up = GetTickCount64() / 1000.0
if not unix_timestamp:
st = str(dt.fromtimestamp(now - up))
return st if "." not in st else st[:st.index(".")]
return now - up
def GetClosestLanguage():
"""
Returns the language file closest to system locale.
"""
langDir = join(dirname(abspath(sys.executable)), "languages")
if exists(langDir):
locale = wx.Locale()
name = locale.GetLanguageCanonicalName(locale.GetSystemLanguage())
if exists(join(langDir, name + ".py")):
return name
else:
for f in [f for f in os.listdir(langDir) if f.endswith(".py")]:
if f.startswith(name[0:3]):
return f[0:5]
return "en_EN"
def GetFirstParagraph(text):
"""
Return the first paragraph of a description string.
The string can be encoded in HTML or reStructuredText.
The paragraph is returned as HTML.
"""
text = text.lstrip()
if text.startswith("<md>"):
text = text[4:]
text = DecodeMarkdown(text)
start = text.find("<p>")
end = text.find("</p>")
return text[start + 3:end].replace("\n", " ")
elif text.startswith("<rst>"):
text = text[5:]
text = DecodeReST(text)
start = text.find("<p>")
end = text.find("</p>")
return text[start + 3:end].replace("\n", " ")
else:
result = ""
for line in text.splitlines():
if line == "":
break
result += " " + line
return ' '.join(result.split())
def GetFuncArgString(func, args, kwargs):
classname = ""
argnames = inspect.getargspec(func)[0]
start = 0
if argnames:
if argnames[0] == "self":
classname = args[0].__class__.__name__ + "."
start = 1
res = []
append = res.append
for key, value in zip(argnames, args)[start:]:
append(str(key) + GetMyRepresentation(value))
for key, value in kwargs.items():
append(str(key) + GetMyRepresentation(value))
fname = classname + func.__name__
return fname, "(" + ", ".join(res) + ")"
def GetMyRepresentation(value):
"""
Give a shorter representation of some wx-objects. Returns normal repr()
for everything else. Also adds a "=" sign at the beginning to make it
useful as a "formatvalue" function for inspect.formatargvalues().
"""
typeString = repr(type(value))
if typeString.startswith("<class 'wx._core."):
return "=<wx.%s>" % typeString[len("<class 'wx._core."): -2]
if typeString.startswith("<class 'wx._controls."):
return "=<wx.%s>" % typeString[len("<class 'wx._controls."): -2]
return "=" + repr(value)
def GetTopLevelWindow(window):
"""
Returns the top level parent window of a wx.Window. This is in most
cases a wx.Dialog or wx.Frame.
"""
result = window
while True:
parent = result.GetParent()
if parent is None:
return result
elif isinstance(parent, wx.TopLevelWindow):
return parent
result = parent
def GetUpTime(seconds = True):
"""
Returns a runtime of system in seconds.
If seconds == False, returns the number of days, hours, minutes and seconds.
"""
GetTickCount64 = windll.kernel32.GetTickCount64
GetTickCount64.restype = c_ulonglong
ticks = GetTickCount64() / 1000.0
if not seconds:
delta = str(td(seconds = ticks))
return delta if "." not in delta else delta[:delta.index(".")]
return ticks
def IsVista():
"""
Determine if we're running Vista or higher.
"""
return (sys.getwindowsversion()[0] >= 6)
def IsXP():
"""
Determine if we're running XP or higher.
"""
return (sys.getwindowsversion()[0:2] >= (5, 1))
def LogIt(func):
"""
Logs the function call, if eg.debugLevel is set.
"""
if not eg.debugLevel:
return func
if func.func_code.co_flags & 0x20:
raise TypeError("Can't wrap generator function")
def LogItWrapper(*args, **kwargs):
funcName, argString = GetFuncArgString(func, args, kwargs)
eg.PrintDebugNotice(funcName + argString)
return func(*args, **kwargs)
return update_wrapper(LogItWrapper, func)
def LogItWithReturn(func):
"""
Logs the function call and return, if eg.debugLevel is set.
"""
if not eg.debugLevel:
return func
def LogItWithReturnWrapper(*args, **kwargs):
funcName, argString = GetFuncArgString(func, args, kwargs)
eg.PrintDebugNotice(funcName + argString)
result = func(*args, **kwargs)
eg.PrintDebugNotice(funcName + " => " + repr(result))
return result
return update_wrapper(LogItWithReturnWrapper, func)
def ParseString(text, filterFunc=None):
start = 0
chunks = []
last = len(text) - 1
while 1:
pos = text.find('{', start)
if pos < 0:
break
if pos == last:
break
chunks.append(text[start:pos])
if text[pos + 1] == '{':
chunks.append('{')
start = pos + 2
else:
start = pos + 1
end = text.find('}', start)
if end == -1:
raise SyntaxError("unmatched bracket")
word = text[start:end]
res = None
if filterFunc:
res = filterFunc(word)
if res is None:
res = eval(word, {}, eg.globals.__dict__)
chunks.append(unicode(res))
start = end + 1
chunks.append(text[start:])
return "".join(chunks)
def PrepareDocstring(docstring):
"""
Convert a docstring into lines of parseable reST. Return it as a list of
lines usable for inserting into a docutils ViewList (used as argument
of nested_parse()). An empty line is added to act as a separator between
this docstring and following content.
"""
lines = docstring.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxint
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
# make sure there is an empty line at the end
if lines and lines[-1]:
lines.append('')
return "\n".join(lines)
def Reset():
eg.stopExecutionFlag = True
eg.programCounter = None
del eg.programReturnStack[:]
eg.eventThread.ClearPendingEvents()
eg.actionThread.ClearPendingEvents()
eg.PrintError("Execution stopped by user")
def SetDefault(targetCls, defaultCls):
targetDict = targetCls.__dict__
for defaultKey, defaultValue in defaultCls.__dict__.iteritems():
if defaultKey not in targetDict:
setattr(targetCls, defaultKey, defaultValue)
elif type(defaultValue) in USER_CLASSES:
SetDefault(targetDict[defaultKey], defaultValue)
def SplitFirstParagraph(text):
"""
Split the first paragraph of a description string.
The string can be encoded in HTML or reStructuredText.
The paragraph is returned as HTML.
"""
text = text.lstrip()
if text.startswith("<md>"):
text = text[4:]
text = DecodeMarkdown(text)
start = text.find("<p>")
end = text.find("</p>")
return (
text[start + 3:end].replace("\n", " "),
text[end + 4:].replace("\n", " ")
)
elif text.startswith("<rst>"):
text = text[5:]
text = DecodeReST(text)
start = text.find("<p>")
end = text.find("</p>")
return (
text[start + 3:end].replace("\n", " "),
text[end + 4:].replace("\n", " ")
)
else:
result = ""
remaining = ""
lines = text.splitlines()
for i, line in enumerate(lines):
if line.strip() == "":
remaining = " ".join(lines[i:])
break
result += " " + line
return ' '.join(result.split()), remaining
def TimeIt(func):
""" Decorator to measure the execution time of a function.
Will print the time to the log.
"""
if not eg.debugLevel:
return func
def TimeItWrapper(*args, **kwargs):
startTime = time.clock()
funcName, _ = GetFuncArgString(func, args, kwargs)
res = func(*args, **kwargs)
eg.PrintDebugNotice(funcName + " :" + repr(time.clock() - startTime))
return res
return update_wrapper(TimeItWrapper, func)
def UpdateStartupShortcut(create):
from eg import Shortcut
path = os.path.join(
eg.folderPath.Startup,
eg.APP_NAME + ".lnk"
)
if os.path.exists(path):
os.remove(path)
if create:
if not os.path.exists(eg.folderPath.Startup):
os.makedirs(eg.folderPath.Startup)
Shortcut.Create(
path=path,
target=os.path.abspath(sys.executable),
arguments="-h -e OnInitAfterBoot",
startIn=os.path.dirname(os.path.abspath(sys.executable)),
)<|fim▁end|>
|
)
|
<|file_name|>test_discretization.py<|end_file_name|><|fim▁begin|>from unittest import TestCase
from tcontrol.discretization import c2d
from ..transferfunction import tf
from ..model_conversion import *<|fim▁hole|>
class TestDiscretization(TestCase):
def setUp(self):
self.s1 = tf([1], [1, 0, 1])
self.zoh = tf([0.4597, 0.4597], [1, 1.0806, 1], dt=1)
self.ss = tf2ss(tf([1], [1, 0, 1]))
def test_c2d_zoh(self):
d_sys = c2d(self.s1, 1, 'zoh')
self.assertLessEqual(np.max(np.abs(d_sys.num - self.zoh.num)), 1e-4)
def test_c2d_foh(self):
a = c2d(self.ss, 1, 'foh')
b = StateSpace([[0.540302, 0.841471], [-0.841471, 0.540302]],
[[0.773644], [0.49675]],
[[1, 0]], [[0.158529]], dt=1)
assert_ss_equal(a, b)
def test_c2d_tustin(self):
d_sys = c2d(self.s1, 1, 'tustin')
error = np.abs(d_sys.num - np.array([0.2, 0.4, 0.2]))
self.assertLessEqual(np.max(error), 1e-4)
def test_c2d_matched(self):
d_sys = c2d(self.s1, 1, 'matched')
error = np.abs(d_sys.num - np.array([0.2298, 0.4597, 0.2298]))
self.assertLessEqual(np.max(error), 1e-4)<|fim▁end|>
|
from ..statespace import StateSpace
import numpy as np
from .tools.test_utility import assert_ss_equal
|
<|file_name|>mv.py<|end_file_name|><|fim▁begin|># -*- coding: utf8 -*-
class Mv:
def command(self):
self.config = {
"command": {
"mv": {
"function": self.mvScreams,
"usage": "mv <user>",
"help": "Le clavier y colle!"
}
}}
return self.config
def mvScreams(self, Morphux, infos):
print(infos)
if (len(infos['args']) == 0 and infos['nick'] == "valouche"):
Morphux.sendMessage("Ta mere la chauve", infos['nick'])
elif (len(infos['args']) == 0 and infos['nick'] == "Ne02ptzero"):
Morphux.sendMessage("TU VAS LA CHIER TA CHIASSE?", infos['nick'])
elif (len(infos['args']) == 0):<|fim▁hole|> Morphux.sendMessage("SARACE BOULBA", infos['nick'])
elif (infos['args'][0] == "allow"):
Morphux.sendMessage("ALLOW?", infos['nick'])
elif (infos['args'][0] == "thunes"):
Morphux.sendMessage("Money equals power", infos['nick'])
elif (infos['args'][0] == "theodule"):
Morphux.sendMessage("THEODUUULE", infos['nick'])
elif (infos['args'][0] == "gg"):
Morphux.sendMessage("Le beau jeu, le beau geste, la lucidité !", infos['nick'])
elif (Morphux.userExists(infos['args'][0]) == 0):
Morphux.sendMessage("Respecte toi " + infos['args'][0] + "!", infos['nick'])<|fim▁end|>
| |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![deny(unsafe_code)]
#![cfg_attr(feature = "unstable", feature(conservative_impl_trait))]
extern crate backtrace;
extern crate bluetooth_traits;
extern crate canvas;
extern crate canvas_traits;
extern crate clipboard;
extern crate compositing;
extern crate debugger;
extern crate devtools_traits;
#[cfg(not(target_os = "ios"))]
extern crate embedder_traits;
extern crate euclid;
#[cfg(all(not(target_os = "windows"), not(target_os = "ios")))]
extern crate gaol;
extern crate gfx;
extern crate gfx_traits;
extern crate hyper;
extern crate ipc_channel;
extern crate keyboard_types;
extern crate layout_traits;
#[macro_use]
extern crate log;
extern crate metrics;
extern crate msg;
extern crate net;
extern crate net_traits;
extern crate profile_traits;
extern crate script_traits;
extern crate serde;
#[macro_use]
extern crate servo_channel;
extern crate servo_config;
extern crate servo_rand;
extern crate servo_remutex;
extern crate servo_url;
extern crate style_traits;<|fim▁hole|>
mod browsingcontext;
mod constellation;
mod event_loop;
mod network_listener;
mod pipeline;
#[cfg(all(not(target_os = "windows"), not(target_os = "ios")))]
mod sandboxing;
mod session_history;
mod timer_scheduler;
pub use constellation::{Constellation, FromCompositorLogger, FromScriptLogger, InitialConstellationState};
pub use pipeline::UnprivilegedPipelineContent;
#[cfg(all(not(target_os = "windows"), not(target_os = "ios")))]
pub use sandboxing::content_process_sandbox_profile;<|fim▁end|>
|
extern crate webrender_api;
extern crate webvr_traits;
|
<|file_name|>test_fixes.py<|end_file_name|><|fim▁begin|># Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Alex Gramfort <[email protected]>
# License: BSD
import numpy as np
from nose.tools import assert_equal, assert_raises
from numpy.testing import assert_array_equal
from distutils.version import LooseVersion
from scipy import signal
from ..fixes import (_in1d, _tril_indices, _copysign, _unravel_index,
_Counter, _unique, _bincount, _digitize)
from ..fixes import _firwin2 as mne_firwin2<|fim▁hole|>
def test_counter():
"""Test Counter replacement"""
import collections
try:
Counter = collections.Counter
except:
pass
else:
a = Counter([1, 2, 1, 3])
b = _Counter([1, 2, 1, 3])
for key, count in zip([1, 2, 3], [2, 1, 1]):
assert_equal(a[key], b[key])
def test_unique():
"""Test unique() replacement
"""
# skip test for np version < 1.5
if LooseVersion(np.__version__) < LooseVersion('1.5'):
return
for arr in [np.array([]), np.random.rand(10), np.ones(10)]:
# basic
assert_array_equal(np.unique(arr), _unique(arr))
# with return_index=True
x1, x2 = np.unique(arr, return_index=True, return_inverse=False)
y1, y2 = _unique(arr, return_index=True, return_inverse=False)
assert_array_equal(x1, y1)
assert_array_equal(x2, y2)
# with return_inverse=True
x1, x2 = np.unique(arr, return_index=False, return_inverse=True)
y1, y2 = _unique(arr, return_index=False, return_inverse=True)
assert_array_equal(x1, y1)
assert_array_equal(x2, y2)
# with both:
x1, x2, x3 = np.unique(arr, return_index=True, return_inverse=True)
y1, y2, y3 = _unique(arr, return_index=True, return_inverse=True)
assert_array_equal(x1, y1)
assert_array_equal(x2, y2)
assert_array_equal(x3, y3)
def test_bincount():
"""Test bincount() replacement
"""
# skip test for np version < 1.6
if LooseVersion(np.__version__) < LooseVersion('1.6'):
return
for minlength in [None, 100]:
x = _bincount(np.ones(10, int), None, minlength)
y = np.bincount(np.ones(10, int), None, minlength)
assert_array_equal(x, y)
def test_in1d():
"""Test numpy.in1d() replacement"""
a = np.arange(10)
b = a[a % 2 == 0]
assert_equal(_in1d(a, b).sum(), 5)
def test_digitize():
"""Test numpy.digitize() replacement"""
data = np.arange(9)
bins = [0, 5, 10]
left = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
right = np.array([0, 1, 1, 1, 1, 1, 2, 2, 2])
assert_array_equal(_digitize(data, bins), left)
assert_array_equal(_digitize(data, bins, True), right)
assert_raises(NotImplementedError, _digitize, data + 0.1, bins, True)
assert_raises(NotImplementedError, _digitize, data, [0., 5, 10], True)
def test_tril_indices():
"""Test numpy.tril_indices() replacement"""
il1 = _tril_indices(4)
il2 = _tril_indices(4, -1)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
assert_array_equal(a[il1],
np.array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
assert_array_equal(a[il2], np.array([5, 9, 10, 13, 14, 15]))
def test_unravel_index():
"""Test numpy.unravel_index() replacement"""
assert_equal(_unravel_index(2, (2, 3)), (0, 2))
assert_equal(_unravel_index(2, (2, 2)), (1, 0))
assert_equal(_unravel_index(254, (17, 94)), (2, 66))
assert_equal(_unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), (2, 1, 4))
assert_array_equal(_unravel_index(np.array([22, 41, 37]), (7, 6)),
[[3, 6, 6], [4, 5, 1]])
assert_array_equal(_unravel_index(1621, (6, 7, 8, 9)), (3, 1, 4, 1))
def test_copysign():
"""Test numpy.copysign() replacement"""
a = np.array([-1, 1, -1])
b = np.array([1, -1, 1])
assert_array_equal(_copysign(a, b), b)
assert_array_equal(_copysign(b, a), a)
def test_firwin2():
"""Test firwin2 backport
"""
taps1 = mne_firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
taps2 = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
assert_array_equal(taps1, taps2)
def test_filtfilt():
"""Test IIR filtfilt replacement
"""
x = np.r_[1, np.zeros(100)]
# Filter with an impulse
y = mne_filtfilt([1, 0], [1, 0], x, padlen=0)
assert_array_equal(x, y)<|fim▁end|>
|
from ..fixes import _filtfilt as mne_filtfilt
|
<|file_name|>main.py<|end_file_name|><|fim▁begin|>from flask import Blueprint, render_template, redirect, url_for, current_app
monitoring_main = Blueprint('monitoring_main', __name__, # pylint: disable=invalid-name
template_folder='templates',
static_url_path='/static',
static_folder='static')
@monitoring_main.context_processor
def inject_data():
data = {
'dashboards': current_app.config['monitoring']['dashboards'],
'uchiwa_url': current_app.config['monitoring']['uchiwa_url'],
}
return data
@monitoring_main.route('/')
def index():<|fim▁hole|>
@monitoring_main.route('/events')
def events():
return render_template('events.html', title='Events')
@monitoring_main.route('/checks')
def checks():
return render_template('checks.html', title='Checks')
@monitoring_main.route('/clients')
def clients():
return render_template('clients.html', title='Clients')
@monitoring_main.route('/clients/<zone>/<client_name>')
def client(zone, client_name):
return render_template('client_details.html', zone=zone,
client=client_name, title='Client Details')
@monitoring_main.route('/clients/<zone>/<client_name>/events/<check>')
def client_event(zone, client_name, check):
return render_template('client_event_details.html', zone=zone,
client=client_name, check=check, title='Event Details')<|fim▁end|>
|
return redirect(url_for('monitoring_main.events'))
|
<|file_name|>Vec2D.py<|end_file_name|><|fim▁begin|>"basic 2D vector geometry"
from math import acos, sqrt, sin, cos, pi
class Vec2D(object):
" Simple 2D vector class for euclidean geometry "
EPSILON = 0.0001
def __init__(self, x=0.0, y=0.0):
self.pos_x = x
self.pos_y = y
def dot(self, other):
"dot product"
return self.pos_x * other.pos_x + self.pos_y * other.pos_y
def cross(self, other):
"2d cross product"
return self.pos_x * other.pos_y - self.pos_y * other.pos_x
def length(self):
"length of vector"
return sqrt(self.dot(self))
def normalized(self):
"unit vector with same direction as self"
length = self.length()
return self * (1/length)
def rotate(self, angle, center=None):
"rotate self by angle radians around center"
if center is None:
center = Vec2D()
centered = self - center
cosine = cos(angle)
sine = sin(angle)
new_pos_x = cosine * centered.pos_x - sine * centered.pos_y
new_pos_y = sine * centered.pos_x + cosine * centered.pos_y
final = Vec2D(new_pos_x, new_pos_y) + center
return final
def oriented_angle(self, other):
"oriented angle from self to other"
vec1 = self.normalized()
vec2 = other.normalized()
cross_prod = vec1.cross(vec2) # sin(angle)
dot_prod = vec1.dot(vec2) # cos(angle)
if dot_prod < -1.0:
dot_prod = -1.0
if dot_prod > 1.0:
dot_prod = 1.0
if cross_prod > 0:
angle = acos(dot_prod)
else:
angle = -acos(dot_prod)
if angle < 0:
angle = angle + 2 * pi
return angle
def __neg__(self):
return Vec2D(-self.pos_x, -self.pos_y)
def __add__(self, other):
return Vec2D(self.pos_x + other.pos_x, self.pos_y + other.pos_y)
def __sub__(self, other):<|fim▁hole|>
def __mul__(self, other):
return Vec2D(self.pos_x * other, self.pos_y * other)
def __str__(self):
return "({x},{y})".format(x=self.pos_x, y=self.pos_y)
def is_equal(self, other):
return (self - other).length() < Vec2D.EPSILON
@staticmethod
def orientation(vec1, vec2, vec3):
"return positive number if the points are mathematically \
positively oriented negative number for negative orientation \
and zero for colinear points"
vec12 = vec2 - vec1
vec23 = vec3 - vec2
return vec12.cross(vec23)<|fim▁end|>
|
return Vec2D(self.pos_x - other.pos_x, self.pos_y - other.pos_y)
|
<|file_name|>testroute.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from .base import BaseHandler
<|fim▁hole|><|fim▁end|>
|
class TestRoute(BaseHandler):
def get(self, file):
return self.render(str(file) + '.jade', show_h1=1)
|
<|file_name|>searching_polyhedron_vertices_with_fuzzy_sphere.cpp<|end_file_name|><|fim▁begin|>#include <CGAL/Exact_predicates_inexact_constructions_kernel.h>
#include <CGAL/Polyhedron_3.h>
#include <CGAL/IO/Polyhedron_iostream.h>
#include <CGAL/boost/graph/graph_traits_Polyhedron_3.h>
#include <CGAL/Search_traits_3.h>
#include <CGAL/Search_traits_adapter.h>
#include <CGAL/Kd_tree.h>
#include <CGAL/Fuzzy_sphere.h>
#include <fstream>
typedef CGAL::Exact_predicates_inexact_constructions_kernel Kernel;
typedef Kernel::Point_3 Point_3;
typedef CGAL::Polyhedron_3<Kernel> Mesh;
typedef boost::graph_traits<Mesh>::vertex_descriptor vertex_descriptor;
typedef boost::property_map<Mesh,CGAL::vertex_point_t>::type Vertex_point_pmap;
typedef CGAL::Search_traits_3<Kernel> Traits_base;
typedef CGAL::Search_traits_adapter<vertex_descriptor, Vertex_point_pmap, Traits_base> Traits;
typedef CGAL::Kd_tree<Traits> Tree;
typedef Tree::Splitter Splitter;
int main(int argc, char* argv[])
{
Mesh mesh;
std::ifstream in((argc>1)?argv[1]:"data/tripod.off");
in >> mesh;
Vertex_point_pmap vppmap = get(CGAL::vertex_point,mesh);
// Insert number_of_data_points in the tree
Tree tree(vertices(mesh).begin(),
vertices(mesh).end(),
Splitter(),
Traits(vppmap)
);<|fim▁hole|>
// search vertices
CGAL::Fuzzy_sphere<Traits> fz(query, radius, epsilon);
//collect vertices that are inside the sphere
std::list<vertex_descriptor> result;
tree.search(std::back_inserter(result), fz);
std::cout << "There are " << result.size() << " vertices inside the fuzzy sphere\n";
return 0;
}<|fim▁end|>
|
Point_3 query(0.0, 0.0, 0.0);
double radius = 0.5;
double epsilon = 0.01;
|
<|file_name|>client.js<|end_file_name|><|fim▁begin|><|fim▁hole|>
export default axios.create({
baseURL: 'http://localhost:9000/v1/'
});<|fim▁end|>
|
import axios from 'axios';
|
<|file_name|>processor.py<|end_file_name|><|fim▁begin|>from indra.statements import *
from rdflib import URIRef, Namespace
from rdflib.namespace import RDF
import collections
import urllib
import re
import keyword
BEL = Namespace("http://www.openbel.org/")
prefixes = """
PREFIX belvoc: <http://www.openbel.org/vocabulary/>
PREFIX belsc: <http://www.openbel.org/bel/>
PREFIX belns: <http://www.openbel.org/bel/namespace/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>"""
phospho_mods = [
'PhosphorylationSerine',
'PhosphorylationThreonine',
'PhosphorylationTyrosine',
'Phosphorylation',
]
class InvalidNameError(ValueError):
def __init__(self, name):
ValueError.__init__(self, "Not a valid name: %s" % name)
def name_from_uri(uri):
"""Make the URI term usable as a valid Python identifier, if possible.
First strips of the extra URI information by calling term_from_uri,
then checks to make sure the name is a valid Python identifier.
Currently fixes identifiers starting with numbers by prepending with
the letter 'p'. For other cases it raises an exception.
This function should be called when the string that is returned is to be
used as a PySB component name, which are required to be valid Python
identifiers.
"""
name = term_from_uri(uri)
# Handle the case where the string starts with a number
if name[0].isdigit():
name = 'p' + name
if re.match("[_A-Za-z][_a-zA-Z0-9]*$", name) \
and not keyword.iskeyword(name):
pass
else:
raise InvalidNameError(name)
return name
def gene_name_from_uri(uri):
return name_from_uri(uri).upper()
def term_from_uri(uri):
"""Basic conversion of RDF URIs to more friendly strings.
Removes prepended URI information, and replaces spaces and hyphens with
underscores.
"""
if uri is None:
return None
# Strip gene name off from URI
term = uri.rsplit('/')[-1]
# Decode URL to handle spaces, special characters
term = urllib.unquote(term)
# Replace any spaces, hyphens, or periods with underscores
term = term.replace(' ', '_')
term = term.replace('-', '_')
term = term.replace('.', '_')
term = term.encode('ascii', 'ignore')
return term
def strip_statement(uri):
uri = uri.replace(r'http://www.openbel.org/bel/', '')
uri = uri.replace(r'http://www.openbel.org/vocabulary/', '')
return uri
class BelProcessor(object):
def __init__(self, g):
self.g = g
self.statements = []
self.all_stmts = []
self.converted_stmts = []
self.degenerate_stmts = []
self.indirect_stmts = []
def get_evidence(self, statement):
evidence = None
citation = None
annotations = []
# Query for evidence text and citation
q_evidence = prefixes + """
SELECT ?evidenceText ?citation
WHERE {
<%s> belvoc:hasEvidence ?evidence .
?evidence belvoc:hasEvidenceText ?evidenceText .
?evidence belvoc:hasCitation ?citation .
}
""" % statement.format()
res_evidence = self.g.query(q_evidence)
for stmt in res_evidence:
try:
evidence = stmt[0].format()
citation = stmt[1].format()
except KeyError:
warnings.warn('Problem converting evidence/citation string')
# Query for all annotations of the statement
q_annotations = prefixes + """
SELECT ?annotation
WHERE {
<%s> belvoc:hasEvidence ?evidence .
?evidence belvoc:hasAnnotation ?annotation .
}
""" % statement.format()
res_annotations = self.g.query(q_annotations)
for stmt in res_annotations:
annotations.append(stmt[0].format())
return (citation, evidence, annotations)
def get_modifications(self):
q_phospho = prefixes + """
SELECT ?enzName ?actType ?substrateName ?mod ?pos
?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship belvoc:DirectlyIncreases .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?subject a belvoc:AbundanceActivity .
?subject belvoc:hasActivityType ?actType .
?subject belvoc:hasChild ?enzyme .
?enzyme a belvoc:ProteinAbundance .
?enzyme belvoc:hasConcept ?enzName .
?object a belvoc:ModifiedProteinAbundance .
?object belvoc:hasModificationType ?mod .
?object belvoc:hasChild ?substrate .
?substrate belvoc:hasConcept ?substrateName .
OPTIONAL { ?object belvoc:hasModificationPosition ?pos . }
}
"""
# Now make the PySB for the phosphorylation
res_phospho = self.g.query(q_phospho)
for stmt in res_phospho:
(citation, evidence, annotations) = self.get_evidence(stmt[5])
# Parse out the elements of the query
enz_name = gene_name_from_uri(stmt[0])
enz = Agent(enz_name)
act_type = name_from_uri(stmt[1])
sub_name = gene_name_from_uri(stmt[2])
sub = Agent(sub_name)
mod = term_from_uri(stmt[3])
mod_pos = term_from_uri(stmt[4])
stmt_str = strip_statement(stmt[5])
# Mark this as a converted statement
self.converted_stmts.append(stmt_str)
if act_type == 'Kinase' and mod in phospho_mods:
self.statements.append(
Phosphorylation(enz, sub, mod, mod_pos, stmt_str,<|fim▁hole|> self.statements.append(
Hydroxylation(enz, sub, mod, mod_pos, stmt_str,
citation, evidence, annotations))
elif mod == 'Sumoylation':
self.statements.append(
Sumoylation(enz, sub, mod, mod_pos, stmt_str,
citation, evidence, annotations))
elif mod == 'Acetylation':
self.statements.append(
Acetylation(enz, sub, mod, mod_pos, stmt_str,
citation, evidence, annotations))
elif mod == 'Ubiquitination':
self.statements.append(
Ubiquitination(enz, sub, mod, mod_pos, stmt_str,
citation, evidence, annotations))
else:
print "Warning: Unknown modification type!"
print("Activity: %s, Mod: %s, Mod_Pos: %s" %
(act_type, mod, mod_pos))
else:
print "Warning: Unknown modification type!"
print("Activity: %s, Mod: %s, Mod_Pos: %s" %
(act_type, mod, mod_pos))
def get_dephosphorylations(self):
q_phospho = prefixes + """
SELECT ?phosName ?substrateName ?mod ?pos ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship belvoc:DirectlyDecreases .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?subject belvoc:hasActivityType belvoc:Phosphatase .
?subject belvoc:hasChild ?phosphatase .
?phosphatase a belvoc:ProteinAbundance .
?phosphatase belvoc:hasConcept ?phosName .
?object a belvoc:ModifiedProteinAbundance .
?object belvoc:hasModificationType ?mod .
?object belvoc:hasChild ?substrate .
?substrate belvoc:hasConcept ?substrateName .
OPTIONAL { ?object belvoc:hasModificationPosition ?pos . }
}
"""
# Now make the PySB for the phosphorylation
res_phospho = self.g.query(q_phospho)
for stmt in res_phospho:
(citation, evidence, annotations) = self.get_evidence(stmt[4])
# Parse out the elements of the query
phos_name = gene_name_from_uri(stmt[0])
phos = Agent(phos_name)
sub_name = gene_name_from_uri(stmt[1])
sub = Agent(sub_name)
mod = term_from_uri(stmt[2])
mod_pos = term_from_uri(stmt[3])
stmt_str = strip_statement(stmt[4])
# Mark this as a converted statement
self.converted_stmts.append(stmt_str)
self.statements.append(
Dephosphorylation(phos, sub, mod, mod_pos,
stmt_str, citation,
evidence, annotations))
def get_composite_activating_mods(self):
# To eliminate multiple matches, we use pos1 < pos2 but this will
# only work if the pos is given, otherwise multiple matches of
# the same mod combination may appear in the result
q_mods = prefixes + """
SELECT ?speciesName ?actType ?mod1 ?pos1 ?mod2 ?pos2 ?rel ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship ?rel .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?object belvoc:hasActivityType ?actType .
?object belvoc:hasChild ?species .
?species a belvoc:ProteinAbundance .
?species belvoc:hasConcept ?speciesName .
?subject a belvoc:CompositeAbundance .
?subject belvoc:hasChild ?subject1 .
?subject1 a belvoc:ModifiedProteinAbundance .
?subject1 belvoc:hasModificationType ?mod1 .
?subject1 belvoc:hasChild ?species .
?subject belvoc:hasChild ?subject2 .
?subject2 a belvoc:ModifiedProteinAbundance .
?subject2 belvoc:hasModificationType ?mod2 .
?subject2 belvoc:hasChild ?species .
OPTIONAL { ?subject1 belvoc:hasModificationPosition ?pos1 . }
OPTIONAL { ?subject2 belvoc:hasModificationPosition ?pos2 . }
FILTER ((?rel = belvoc:DirectlyIncreases ||
?rel = belvoc:DirectlyDecreases) &&
?pos1 < ?pos2)
}
"""
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
(citation, evidence, annotations) = self.get_evidence(stmt[7])
# Parse out the elements of the query
species_name = gene_name_from_uri(stmt[0])
species = Agent(species_name)
act_type = term_from_uri(stmt[1])
mod1 = term_from_uri(stmt[2])
mod_pos1 = term_from_uri(stmt[3])
mod2 = term_from_uri(stmt[4])
mod_pos2 = term_from_uri(stmt[5])
rel = term_from_uri(stmt[6])
if rel == 'DirectlyDecreases':
rel = 'decreases'
else:
rel = 'increases'
stmt_str = strip_statement(stmt[7])
# Mark this as a converted statement
self.converted_stmts.append(stmt_str)
self.statements.append(
ActivityModification(species, (mod1, mod2),
(mod_pos1, mod_pos2),
rel, act_type, stmt_str,
citation, evidence, annotations))
def get_activating_mods(self):
q_mods = prefixes + """
SELECT ?speciesName ?actType ?mod ?pos ?rel ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship ?rel .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?object belvoc:hasActivityType ?actType .
?object belvoc:hasChild ?species .
?species a belvoc:ProteinAbundance .
?species belvoc:hasConcept ?speciesName .
?subject a belvoc:ModifiedProteinAbundance .
?subject belvoc:hasModificationType ?mod .
?subject belvoc:hasChild ?species .
OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . }
FILTER (?rel = belvoc:DirectlyIncreases ||
?rel = belvoc:DirectlyDecreases)
}
"""
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
(citation, evidence, annotations) = self.get_evidence(stmt[5])
# Parse out the elements of the query
species_name = gene_name_from_uri(stmt[0])
species = Agent(species_name)
act_type = term_from_uri(stmt[1])
mod = term_from_uri(stmt[2])
mod_pos = term_from_uri(stmt[3])
rel = term_from_uri(stmt[4])
if rel == 'DirectlyDecreases':
rel = 'decreases'
else:
rel = 'increases'
stmt_str = strip_statement(stmt[5])
# Mark this as a converted statement
self.converted_stmts.append(stmt_str)
self.statements.append(
ActivityModification(species, (mod,), (mod_pos,), rel,
act_type, stmt_str,
citation, evidence, annotations))
def get_complexes(self):
# Find all complexes described in the corpus
q_cmplx = prefixes + """
SELECT ?complexTerm ?childName
WHERE {
?complexTerm a belvoc:Term .
?complexTerm a belvoc:ComplexAbundance .
?complexTerm belvoc:hasChild ?child .
?child belvoc:hasConcept ?childName .
}
"""
# Run the query
res_cmplx = self.g.query(q_cmplx)
# Store the members of each complex in a dict of lists, keyed by the
# term for the complex
cmplx_dict = collections.defaultdict(list)
for stmt in res_cmplx:
cmplx_name = term_from_uri(stmt[0])
child_name = gene_name_from_uri(stmt[1])
child = Agent(child_name)
cmplx_dict[cmplx_name].append(child)
# Now iterate over the stored complex information and create binding
# statements
for cmplx_name, cmplx_list in cmplx_dict.iteritems():
if len(cmplx_list) < 2:
msg = 'Complex %s has less than 2 members! Skipping.' % \
cmplx_name
warnings.warn(msg)
else:
self.statements.append(Complex(cmplx_list))
def get_activating_subs(self):
"""
p_HGNC_NRAS_sub_Q_61_K_DirectlyIncreases_gtp_p_HGNC_NRAS
p_HGNC_KRAS_sub_G_12_R_DirectlyIncreases_gtp_p_PFH_RAS_Family
p_HGNC_BRAF_sub_V_600_E_DirectlyIncreases_kin_p_HGNC_BRAF
"""
q_mods = prefixes + """
SELECT ?enzyme_name ?sub_label ?act_type ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship belvoc:DirectlyIncreases .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?subject a belvoc:ProteinAbundance .
?subject belvoc:hasConcept ?enzyme_name .
?subject belvoc:hasChild ?sub_expr .
?sub_expr rdfs:label ?sub_label .
?object a belvoc:AbundanceActivity .
?object belvoc:hasActivityType ?act_type .
?object belvoc:hasChild ?enzyme .
?enzyme a belvoc:ProteinAbundance .
?enzyme belvoc:hasConcept ?enzyme_name .
}
"""
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
(citation, evidence, annotations) = self.get_evidence(stmt[3])
# Parse out the elements of the query
enz_name = gene_name_from_uri(stmt[0])
enz = Agent(enz_name)
sub_expr = term_from_uri(stmt[1])
act_type = term_from_uri(stmt[2])
# Parse the WT and substituted residues from the node label.
# Strangely, the RDF for substituted residue doesn't break the
# terms of the BEL expression down into their meaning, as happens
# for modified protein abundances. Instead, the substitution
# just comes back as a string, e.g., "sub(V,600,E)". This code
# parses the arguments back out using a regular expression.
match = re.match('sub\(([A-Z]),([0-9]*),([A-Z])\)', sub_expr)
if match:
matches = match.groups()
wt_residue = matches[0]
position = matches[1]
sub_residue = matches[2]
else:
print("Warning: Could not parse substitution expression %s" %
sub_expr)
continue
stmt_str = strip_statement(stmt[3])
# Mark this as a converted statement
self.converted_stmts.append(stmt_str)
self.statements.append(
ActivatingSubstitution(enz, wt_residue, position,
sub_residue, act_type,
stmt_str,
citation, evidence, annotations))
def get_activity_activity(self):
# Query for all statements where the activity of one protein
# directlyIncreases the activity of another protein, without reference
# to a modification.
q_stmts = prefixes + """
SELECT ?subjName ?subjActType ?rel ?objName ?objActType
?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasSubject ?subj .
?stmt belvoc:hasObject ?obj .
?stmt belvoc:hasRelationship ?rel .
?subj belvoc:hasActivityType ?subjActType .
?subj belvoc:hasChild ?subjProt .
?subjProt belvoc:hasConcept ?subjName .
?obj belvoc:hasActivityType ?objActType .
?obj belvoc:hasChild ?objProt .
?objProt belvoc:hasConcept ?objName .
FILTER (?rel = belvoc:DirectlyIncreases ||
?rel = belvoc:DirectlyDecreases)
}
"""
res_stmts = self.g.query(q_stmts)
for stmt in res_stmts:
(citation, evidence, annotations) = self.get_evidence(stmt[5])
subj_name = gene_name_from_uri(stmt[0])
subj = Agent(subj_name)
subj_activity = name_from_uri(stmt[1])
rel = term_from_uri(stmt[2])
if rel == 'DirectlyDecreases':
rel = 'decreases'
else:
rel = 'increases'
obj_name = gene_name_from_uri(stmt[3])
obj = Agent(obj_name)
obj_activity = name_from_uri(stmt[4])
stmt_str = strip_statement(stmt[5])
# Mark this as a converted statement
self.converted_stmts.append(stmt_str)
# Distinguish the case when the activator is a RasGTPase
# (since this may involve unique and stereotyped mechanisms)
if subj_activity == 'GtpBound':
self.statements.append(
RasGtpActivityActivity(subj, subj_activity,
rel, obj, obj_activity,
stmt_str,
citation, evidence, annotations))
# If the object is a Ras-like GTPase, and the subject *increases*
# its GtpBound activity, then the subject is a RasGEF
elif obj_activity == 'GtpBound' and \
rel == 'DirectlyIncreases':
self.statements.append(
RasGef(subj, subj_activity, obj,
stmt_str, citation, evidence, annotations))
# If the object is a Ras-like GTPase, and the subject *decreases*
# its GtpBound activity, then the subject is a RasGAP
elif obj_activity == 'GtpBound' and \
rel == 'DirectlyDecreases':
self.statements.append(
RasGap(subj, subj_activity, obj,
stmt_str, citation, evidence, annotations))
# Otherwise, create a generic Activity->Activity statement
else:
self.statements.append(
ActivityActivity(subj, subj_activity,
rel, obj, obj_activity,
stmt_str,
citation, evidence, annotations))
"""
#print "--------------------------------"
print stmt_str
print("This statement says that:")
print("%s activity increases activity of %s" %
(subj_name, obj_name))
print "It doesn't specify the site."
act_mods = []
for bps in self.statements:
if type(bps) == ActivatingModification and \
bps.monomer_name == obj_name:
act_mods.append(bps)
# If we know about an activation modification...
if act_mods:
print "However, I happen to know about the following"
print "activating modifications for %s:" % obj_name
for act_mod in act_mods:
print " %s at %s" % (act_mod.mod, act_mod.mod_pos)
"""
def get_all_direct_statements(self):
"""Get all directlyIncreases/Decreases statements in the corpus.
Stores the results of the query in self.all_stmts.
"""
print "Getting all direct statements...\n"
q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasSubject ?subj .
?stmt belvoc:hasObject ?obj .
{
{ ?subj a belvoc:AbundanceActivity . }
UNION
{ ?subj a belvoc:ComplexAbundance . }
UNION
{ ?subj a belvoc:ProteinAbundance . }
UNION
{ ?subj a belvoc:ModifiedProteinAbundance . }
}
{
{ ?obj a belvoc:AbundanceActivity . }
UNION
{ ?obj a belvoc:ComplexAbundance . }
UNION
{ ?obj a belvoc:ProteinAbundance . }
UNION
{ ?obj a belvoc:ModifiedProteinAbundance . }
}
{
{ ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }
}
}
"""
q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
{
{ ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }
}
}
"""
res_stmts = self.g.query(q_stmts)
self.all_stmts = [strip_statement(stmt[0]) for stmt in res_stmts]
def get_indirect_statements(self):
q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
{
{ ?stmt belvoc:hasRelationship belvoc:Increases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:Decreases . }
}
}
"""
res_stmts = self.g.query(q_stmts)
self.indirect_stmts = [strip_statement(stmt[0]) for stmt in res_stmts]
def get_degenerate_statements(self):
print "Checking for 'degenerate' statements...\n"
# Get rules of type protein X -> activity Y
q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasSubject ?subj .
?stmt belvoc:hasObject ?obj .
{
{ ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }
}
{
{ ?subj a belvoc:ProteinAbundance . }
UNION
{ ?subj a belvoc:ModifiedProteinAbundance . }
}
?subj belvoc:hasConcept ?xName .
{
{
?obj a belvoc:ProteinAbundance .
?obj belvoc:hasConcept ?yName .
}
UNION
{
?obj a belvoc:ModifiedProteinAbundance .
?obj belvoc:hasChild ?proteinY .
?proteinY belvoc:hasConcept ?yName .
}
UNION
{
?obj a belvoc:AbundanceActivity .
?obj belvoc:hasChild ?objChild .
?objChild a belvoc:ProteinAbundance .
?objChild belvoc:hasConcept ?yName .
}
}
FILTER (?xName != ?yName)
}
"""
res_stmts = self.g.query(q_stmts)
print "Protein -> Protein/Activity statements:"
print "---------------------------------------"
for stmt in res_stmts:
stmt_str = strip_statement(stmt[0])
print stmt_str
self.degenerate_stmts.append(stmt_str)
def print_statement_coverage(self):
"""Display how many of the direct statements have been converted,
and how many are considered 'degenerate' and not converted."""
if not self.all_stmts:
self.get_all_direct_statements()
if not self.degenerate_stmts:
self.get_degenerate_statements()
if not self.indirect_stmts:
self.get_indirect_statements()
print
print("Total indirect statements: %d" % len(self.indirect_stmts))
print("Total direct statements: %d" % len(self.all_stmts))
print("Converted statements: %d" % len(self.converted_stmts))
print("Degenerate statements: %d" % len(self.degenerate_stmts))
print(">> Total unhandled statements: %d" %
(len(self.all_stmts) - len(self.converted_stmts) -
len(self.degenerate_stmts)))
print
print "--- Unhandled statements ---------"
for stmt in self.all_stmts:
if not (stmt in self.converted_stmts or
stmt in self.degenerate_stmts):
print stmt
def print_statements(self):
for i, stmt in enumerate(self.statements):
print "%s: %s" % (i, stmt)<|fim▁end|>
|
citation, evidence, annotations))
elif act_type == 'Catalytic':
if mod == 'Hydroxylation':
|
<|file_name|>base_filters.py<|end_file_name|><|fim▁begin|>import re
import os
import pytz
from PIL import Image
from dateutil.parser import parse
from datetime import datetime
from decimal import Decimal
from django.template import Library
from django.conf import settings
from django.template.defaultfilters import stringfilter
from django.utils import formats
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape, strip_tags, urlize
from django.contrib.auth.models import AnonymousUser
from django.core.files.storage import default_storage
register = Library()
@register.filter(name="localize_date")
def localize_date(value, to_tz=None):
from timezones.utils import adjust_datetime_to_timezone
try:
if to_tz is None:
to_tz = settings.UI_TIME_ZONE
from_tz = settings.TIME_ZONE
return adjust_datetime_to_timezone(value, from_tz=from_tz, to_tz=to_tz)
except AttributeError:
return ''
localize_date.is_safe = True
@register.filter_function
def date_short(value, arg=None):
"""Formats a date according to the given format."""
from django.utils.dateformat import format
from tendenci.apps.site_settings.utils import get_setting
if not value:
return u''
if arg is None:
s_date_format = get_setting('site', 'global', 'dateformat')
if s_date_format:
arg = s_date_format
else:
arg = settings.SHORT_DATETIME_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
date_short.is_safe = False
@register.filter_function
def date_long(value, arg=None):
"""Formats a date according to the given format."""
from django.utils.dateformat import format
from tendenci.apps.site_settings.utils import get_setting
if not value:
return u''
if arg is None:
s_date_format = get_setting('site', 'global', 'dateformatlong')
if s_date_format:
arg = s_date_format
else:
arg = settings.DATETIME_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
date_long.is_safe = False
@register.filter_function
def date(value, arg=None):
"""Formats a date according to the given format."""
from django.utils.dateformat import format
if not value:
return u''
if arg is None:
arg = settings.DATETIME_FORMAT
else:
if arg == 'long':
return date_long(value)
if arg == 'short':
return date_short(value)
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
date_long.is_safe = False
@register.filter_function
def order_by(queryset, args):
args = [x.strip() for x in args.split(',')]
return queryset.order_by(*args)
@register.filter_function
def str_to_date(string, args=None):
"""Takes a string and converts it to a datetime object"""
date = parse(string)
if date:
return date
return ''
@register.filter_function
def exif_to_date(s, fmt='%Y:%m:%d %H:%M:%S'):
"""
The format of datetime in exif is as follows:
%Y:%m:%d %H:%M:%S
Convert the string with this format to a datetime object.
"""
if not s:
return None
try:
return datetime.strptime(s, fmt)
except ValueError:
return None
@register.filter_function
def in_group(user, group):
if group:
if isinstance(user, AnonymousUser):
return False
return group in [dict['pk'] for dict in user.group_set.values('pk')]
else:
return False
@register.filter
def domain(link):
from urlparse import urlparse
link = urlparse(link)
return link.hostname
@register.filter
def strip_template_tags(string):
p = re.compile('{[#{%][^#}%]+[%}#]}')
return re.sub(p, '', string)
@register.filter
@stringfilter
def stripentities(value):
"""Strips all [X]HTML tags."""
from django.utils.html import strip_entities
return strip_entities(value)
stripentities.is_safe = True
@register.filter
def format_currency(value):
"""format currency"""
from tendenci.apps.base.utils import tcurrency
return tcurrency(value)
format_currency.is_safe = True
<|fim▁hole|> return obj.object
else:
return obj
@register.filter
def scope(object):
return dir(object)
@register.filter
def obj_type(object):
"""
Return object type
"""
return type(object)
@register.filter
def is_iterable(object):
"""
Return boolean
Is the object iterable or not
"""
try:
iter(object)
return True
except TypeError:
return False
@register.filter
@stringfilter
def basename(path):
from os.path import basename
return basename(path)
@register.filter
def date_diff(value, date_to_compare=None):
"""Compare two dates and return the difference in days"""
import datetime
if not isinstance(value, datetime.datetime):
return 0
if not isinstance(date_to_compare, datetime.datetime):
date_to_compare = datetime.datetime.now()
return (date_to_compare - value).days
@register.filter
def first_chars(string, arg):
""" returns the first x characters from a string """
string = str(string)
if arg:
if not arg.isdigit():
return string
return string[:int(arg)]
else:
return string
return string
@register.filter
def rss_date(value, arg=None):
"""Formats a date according to the given format."""
from django.utils import formats
from django.utils.dateformat import format
from datetime import datetime
if not value:
return u''
else:
value = datetime(*value[:-3])
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
rss_date.is_safe = False
@register.filter()
def obfuscate_email(email, linktext=None, autoescape=None):
"""
Given a string representing an email address,
returns a mailto link with rot13 JavaScript obfuscation.
Accepts an optional argument to use as the link text;
otherwise uses the email address itself.
"""
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
email = re.sub('@', '\\\\100', re.sub('\.', '\\\\056', \
esc(email))).encode('rot13')
if linktext:
linktext = esc(linktext).encode('rot13')
else:
linktext = email
rotten_link = """<script type="text/javascript">document.write \
("<n uers=\\\"znvygb:%s\\\">%s<\\057n>".replace(/[a-zA-Z]/g, \
function(c){return String.fromCharCode((c<="Z"?90:122)>=\
(c=c.charCodeAt(0)+13)?c:c-26);}));</script>""" % (email, linktext)
return mark_safe(rotten_link)
obfuscate_email.needs_autoescape = True
@register.filter_function
def split_str(s, args):
"""
Split a string using the python string split method
"""
if args:
if isinstance(s, str):
splitter = args[0]
return s.split(splitter)
return s
return s
@register.filter_function
def str_basename(s):
"""
Get the basename using the python basename method
"""
return basename(s)
@register.filter
@stringfilter
def twitterize(value, autoescape=None):
value = strip_tags(value)
# Link URLs
value = urlize(value, nofollow=False, autoescape=autoescape)
# Link twitter usernames for the first person
value = re.sub(r'(^[^:]+)', r'<a href="http://twitter.com/\1">\1</a>', value)
# Link twitter usernames prefixed with @
value = re.sub(r'(\s+|\A)@([a-zA-Z0-9\-_]*)\b', r'\1<a href="http://twitter.com/\2">@\2</a>', value)
# Link hash tags
value = re.sub(r'(\s+|\A)#([a-zA-Z0-9\-_]*)\b', r'\1<a href="http://search.twitter.com/search?q=%23\2">#\2</a>', value)
return mark_safe(value)
twitterize.is_safe = True
twitterize.needs_autoescape = True
@register.filter
@stringfilter
def twitterdate(value):
from datetime import datetime, timedelta
time = value.replace(" +0000", "")
dt = datetime.strptime(time, "%a, %d %b %Y %H:%M:%S")
return dt + timedelta(hours=-6)
@register.filter
def thumbnail(file, size='200x200'):
# defining the size
x, y = [int(x) for x in size.split('x')]
# defining the filename and the miniature filename
filehead, filetail = os.path.split(file.name)
basename, format = os.path.splitext(filetail)
miniature = basename + '_' + size + format
filename = file.name
miniature_filename = os.path.join(filehead, miniature)
filehead, filetail = os.path.split(file.url)
miniature_url = filehead + '/' + miniature
thumbnail_exist = False
if default_storage.exists(miniature_filename):
mt_filename = default_storage.modified_time(filename)
mt_miniature_filename = default_storage.modified_time(
miniature_filename)
if mt_filename > mt_miniature_filename:
# remove the miniature
default_storage.delete(miniature_filename)
else:
thumbnail_exist = True
# if the image wasn't already resized, resize it
if not thumbnail_exist:
if not default_storage.exists(filename):
return u''
image = Image.open(default_storage.open(filename))
image.thumbnail([x, y], Image.ANTIALIAS)
f = default_storage.open(miniature_filename, 'w')
image.save(f, image.format, quality=90, optimize=1)
f.close()
return miniature_url
@register.filter_function
def datedelta(dt, range_):
from datetime import timedelta
range_type = 'add'
# parse the range
if '+' in range_:
range_ = range_[1:len(range_)]
if '-' in range_:
range_type = 'subtract'
range_ = range_[1:len(range_)]
k, v = range_.split('=')
set_range = {
str(k): int(v)
}
# set the date
if range_type == 'add':
dt = dt + timedelta(**set_range)
if range_type == 'subtract':
dt = dt - timedelta(**set_range)
return dt
@register.filter
def split(str, splitter):
return str.split(splitter)
@register.filter
def tag_split(str):
str = "".join(str)
str = str.replace(", ", ",")
return str.split(",")
@register.filter
def make_range(value):
try:
value = int(value)
if value > 0:
return range(int(value))
return []
except:
return []
@register.filter
def underscore_space(value):
return value.replace("_", " ")
@register.filter
def format_string(value, arg):
return arg % value
@register.filter
def md5_gs(value, arg=None):
import hashlib
from datetime import datetime, timedelta
hashdt = ''
if arg and int(arg):
timestamp = datetime.now() + timedelta(hours=int(arg))
hashdt = hashlib.md5(timestamp.strftime("%Y;%m;%d;%H;%M").replace(';0', ';')).hexdigest()
return ''.join([value, hashdt])
@register.filter
def multiply(value, arg):
return Decimal(str(value)) * Decimal(str(arg))
@register.filter
def add_decimal(value, arg):
return Decimal(str(value)) + Decimal(str(arg))
@register.filter
def phonenumber(value):
if value:
# split number from extension or any text
x = re.split(r'([a-zA-Z]+)', value)
# clean number
y = ''.join(i for i in x[0] if i.isdigit())
if len(y) > 10: # has country code
code = y[:len(y)-10]
number = y[len(y)-10:]
if code == '1':
number = "(%s) %s-%s" %(number[:3], number[3:6], number[6:])
else:
number = "+%s %s %s %s" %(code, number[:3], number[3:6], number[6:])
else: # no country code
number = "(%s) %s-%s" %(y[:3], y[3:6], y[6:])
# attach additional text extension
ext = ''
for i in xrange(1, len(x)):
ext = ''.join((ext, x[i]))
if ext:
return ' '.join((number, ext))
else:
return number
@register.filter
def timezone_label(value):
try:
now = datetime.now(pytz.timezone(value))
tzinfo = now.strftime("%z")
return "(GMT%s) %s" %(tzinfo, value)
except:
return ""
@register.filter
def field_to_string(value):
if isinstance(value, str) or isinstance(value, unicode):
return value
if isinstance(value, list):
if len(value) == 0:
return ""
if len(value) == 1:
return str(value[0])
if len(value) == 2:
return "%s and %s" % (value[0], value[1])
return ", ".join(value)
return str(value)<|fim▁end|>
|
@register.filter
def get_object(obj):
"""return obj.object if this obj has the attribute of object"""
if hasattr(obj, 'object'):
|
<|file_name|>simpleapi.rs<|end_file_name|><|fim▁begin|>// Simple API example, ported from http://lua-users.org/wiki/SimpleLuaApiExample
// This is a simple introductory example of how to interface to Lua from Rust.
// The Rust program loads a Lua script file, sets some Lua variables, runs the
// Lua script, and reads back the return value.
#![allow(uppercase_variables)]
extern crate lua;
use std::{io, os};
use std::iter::range_inclusive;
fn main() {
let mut L = lua::State::new();
L.openlibs(); // Load Lua libraries
// Load the file containing the script we are going to run
let path = Path::new("simpleapi.lua");
match L.loadfile(Some(&path)) {
Ok(_) => (),
Err(_) => {
// If something went wrong, error message is at the top of the stack
let _ = writeln!(&mut io::stderr(),
"Couldn't load file: {}", L.describe(-1));
os::set_exit_status(1);
return;
}
}
/*
* Ok, now here we go: We pass data to the lua script on the stack.
* That is, we first have to prepare Lua's virtual stack the way we
* want the script to receive it, then ask Lua to run it.
*/
L.newtable(); // We will pass a table
/*
* To put values into the table, we first push the index, then the
* value, and then call rawset() with the index of the table in the
* stack. Let's see why it's -3: In Lua, the value -1 always refers to
* the top of the stack. When you create the table with newtable(),
* the table gets pushed into the top of the stack. When you push the
* index and then the cell value, the stack looks like:
*
* - [stack bottom] -- table, index, value [top]
*
* So the -1 will refer to the cell value, thus -3 is used to refer to
* the table itself. Note that rawset() pops the last two elements
* of the stack, so that after it has been called, the table is at the
* top of the stack.<|fim▁hole|> L.rawset(-3); // Stores the pair in the table
}
// By what name is the script going to reference our table?
L.setglobal("foo");
// Ask Lua to run our little script
match L.pcall(0, lua::MULTRET, 0) {
Ok(()) => (),
Err(_) => {
let _ = writeln!(&mut io::stderr(),
"Failed to run script: {}", L.describe(-1));
os::set_exit_status(1);
return;
}
}
// Get the returned value at the to of the stack (index -1)
let sum = L.tonumber(-1);
println!("Script returned: {}", sum);
L.pop(1); // Take the returned value out of the stack
// L's destructor will close the state for us
}<|fim▁end|>
|
*/
for i in range_inclusive(1, 5) {
L.pushinteger(i); // Push the table index
L.pushinteger(i*2); // Push the cell value
|
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:<|fim▁hole|> if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()<|fim▁end|>
|
return
|
<|file_name|>analysis.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from os import path
from collections import defaultdict
import math
root = path.dirname(path.dirname(path.dirname(__file__)))
result_dir = path.join(root, 'results')
def get_file_name(test):
test = '%s_result' % test
return path.join(result_dir, test)
def mean(l):
return float(sum(l))/len(l) if len(l) > 0 else float('nan')
def std_dev(l):
m = mean(l)
return math.sqrt(sum((x - m) ** 2 for x in l) / len(l))
def run_timing_overhead_ana():
test_name = 'timing_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(int(l))
datas = [i for i in datas[:10000]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_loop_overhead_ana():
test_name = 'loop_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(float(l.split(' ')[0]))
datas = [i for i in datas[:10000]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_proc_call_overhead_ana():
test_name = 'proc_call_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
if l.startswith('-'):
datas.append([])
continue
datas[-1].append(int(l.split(' ')[0]) * 1.0 / 10)
print "%s result:" % test_name
for i, data in enumerate(datas):
m = mean(data)
std = std_dev(data)
print "%f\t%f" % (m, std)
#print "%s %d mean: %f" % (test_name, i, mean(data))
#print "%s %d std dev: %f" % (test_name, i, std_dev(data))
def run_process_context_switch_ana():
test_name = 'process_context_switch'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
try:
datas.append(int(l.split(' ')[1]))
except:
pass
datas = [i for i in datas[:100]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_thread_context_switch_ana():
test_name = 'thread_context_switch'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(int(l.split(' ')[1]))
datas = [i for i in datas[:100]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_mem_acc_ana():
test_name = 'mem_acc'
filename = get_file_name(test_name)
datas = defaultdict(lambda: defaultdict(list))
with open(filename) as f:
for l in f:
ll = l.split(' ')
step = int(ll[7])
offset = int(ll[1])<|fim▁hole|> cycle = float(ll[3])
datas[step][offset].append(cycle)
results = {}
offsets = set()
for step, v in sorted(datas.items()):
result = []
for offset, cycles in sorted(v.items()):
offsets.add(offset)
m = mean(cycles)
result.append(m)
results[step] = (result)
print "mem access time result"
fl = "step/offset\t%s" % "\t".join(str(i) for i in sorted(offsets))
print fl
for step, means in sorted(results.items()):
line = "\t".join(str(i) for i in means)
line = "%s\t%s" % (str(step), line)
print line
if __name__ == '__main__':
run_timing_overhead_ana()
run_loop_overhead_ana()
run_proc_call_overhead_ana()
run_process_context_switch_ana()
run_thread_context_switch_ana()
run_mem_acc_ana()<|fim▁end|>
| |
<|file_name|>TSEditorHandler.cpp<|end_file_name|><|fim▁begin|>/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* \file TSEditorHandler.cpp
* \author Pradeep Kadoor
* \copyright Copyright (c) 2011, Robert Bosch Engineering and Business Solutions. All rights reserved.
*/
#include "stdafx.h"
#include ".\tseditorhandler.h"
typedef HRESULT (*SHOWTSEDITORWINDOW)(void* pParentWnd);
typedef HRESULT (*SETTSDILINTERFACEPTR)(void* ptrDILIntrf);
typedef HRESULT (*POSTMESSAGETOTSWND)(UINT msg, WPARAM wParam, LPARAM lParam);
typedef HRESULT (*TSEDITORWINDOWSHOWN)();
typedef HRESULT (*TSEDITORLOADTESTSETUPFILE)(CString omFilePath);
typedef HWND (*TSEDITORHWND)();
//typedef HRESULT (*TSEDITORGETCONFIGDATA)(BYTE*& pDesBuffer, UINT& nBuffSize);
typedef HRESULT (*TSEDITORGETCONFIGDATA)(xmlNodePtr* pxmlNodePtr);
typedef HRESULT (*TSEDITORSETCONFIGDATA)(BYTE* pSrcBuffer, UINT nBuffSize);
typedef HRESULT (*TSEDITORSETXMLCONFIGDATA)(xmlDocPtr pDoc);
SHOWTSEDITORWINDOW pfShowTSEditorwindow;
SETTSDILINTERFACEPTR pfSetTSDILInterfacePtr;
POSTMESSAGETOTSWND pfPostMessageToTSWnd;
TSEDITORWINDOWSHOWN pfTSEditorWindowShown;
TSEDITORLOADTESTSETUPFILE pfTSEditorLoadTestSetupFile;
TSEDITORHWND pfTSEditorHwnd;
TSEDITORGETCONFIGDATA pfTSEditorGetConfigdata;
TSEDITORSETCONFIGDATA pfTSEditorSetConfigdata;
TSEDITORSETXMLCONFIGDATA pfTSEditorSetXMLConfigdata;
TSEditorHandler::TSEditorHandler(void)
{
m_hTSEditorHandle = NULL;
}
TSEditorHandler::~TSEditorHandler(void)
{
if ( m_hTSEditorHandle != NULL )
{
FreeLibrary(m_hTSEditorHandle);
}
}
void TSEditorHandler::vLoadTSEditor_DLL()
{
if ( m_hTSEditorHandle != NULL )
{
FreeLibrary(m_hTSEditorHandle);
m_hTSEditorHandle = NULL;
}
m_hTSEditorHandle = LoadLibrary(def_STR_TESTSETUPEDITORDLL);
vloadFuncPtrAddress();
}
void TSEditorHandler::vInitializeFuncPtrs()
{
pfShowTSEditorwindow = NULL;
pfSetTSDILInterfacePtr = NULL;
pfPostMessageToTSWnd = NULL;
pfTSEditorWindowShown = NULL;
pfTSEditorLoadTestSetupFile = NULL;
pfTSEditorHwnd = NULL;
pfTSEditorGetConfigdata = NULL;
pfTSEditorSetConfigdata = NULL;
pfTSEditorSetXMLConfigdata = NULL;
}
void TSEditorHandler::vloadFuncPtrAddress()
{
pfShowTSEditorwindow = (SHOWTSEDITORWINDOW)GetProcAddress(m_hTSEditorHandle, "TS_vShowTSEditorWindow");
pfSetTSDILInterfacePtr = (SETTSDILINTERFACEPTR)GetProcAddress(m_hTSEditorHandle, "TS_vSetDILInterfacePtr");
pfPostMessageToTSWnd = (POSTMESSAGETOTSWND)GetProcAddress(m_hTSEditorHandle, "TS_vPostMessageToTSWnd");
pfTSEditorWindowShown = (TSEDITORWINDOWSHOWN)GetProcAddress(m_hTSEditorHandle, "TS_hTSEditorWindowShown");
pfTSEditorLoadTestSetupFile = (TSEDITORLOADTESTSETUPFILE)GetProcAddress(m_hTSEditorHandle, "TS_hLoadTestSetupFile");
pfTSEditorHwnd = (TSEDITORHWND)GetProcAddress(m_hTSEditorHandle, "hGetHwnd");
pfTSEditorGetConfigdata = (TSEDITORGETCONFIGDATA)GetProcAddress(m_hTSEditorHandle, "TSE_hGetConfigurationData");
pfTSEditorSetConfigdata = (TSEDITORSETCONFIGDATA)GetProcAddress(m_hTSEditorHandle, "TSE_hSetConfigurationData");
pfTSEditorSetXMLConfigdata = (TSEDITORSETXMLCONFIGDATA)GetProcAddress(m_hTSEditorHandle, "TSE_hSetXmlConfigurationData");
}
void TSEditorHandler::vShowTSEditorWindow(void* pParentWnd)
{
if(pfShowTSEditorwindow != NULL)
{
pfShowTSEditorwindow(pParentWnd);
}
}
void TSEditorHandler::vLoadTestSetupFile(CString omFilePath)
{
if(pfTSEditorLoadTestSetupFile != NULL)
{
pfTSEditorLoadTestSetupFile(omFilePath);
}
}
HWND TSEditorHandler::vGetSafeHWnd()
{
HWND hWnd;
if(pfTSEditorHwnd != NULL)
{
hWnd = pfTSEditorHwnd();
return hWnd;
}
else
{
return NULL;
}
}
void TSEditorHandler::vGetConfigurationData(BYTE*& pDesBuffer, UINT& unBuffSize)
{
if(pfTSEditorLoadTestSetupFile != NULL)
{
// pfTSEditorGetConfigdata(pDesBuffer, unBuffSize);
}
}
void TSEditorHandler::vGetConfigurationData(xmlNodePtr& pxmlNodePtr)
{
if(pfTSEditorLoadTestSetupFile != NULL)
{
pfTSEditorGetConfigdata(&pxmlNodePtr);
}
}
void TSEditorHandler::vSetConfigurationData(BYTE*& pSrcBuffer, UINT& unBuffSize)
{
if(pfTSEditorSetConfigdata!= NULL)
{
pfTSEditorSetConfigdata(pSrcBuffer, unBuffSize);<|fim▁hole|> if(pfTSEditorSetXMLConfigdata!= NULL)
{
pfTSEditorSetXMLConfigdata(pXmlDoc);
}
}<|fim▁end|>
|
}
}
void TSEditorHandler::vSetConfigurationData(xmlDocPtr pXmlDoc)
{
|
<|file_name|>vrstageparameters.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::VRStageParametersBinding;
use crate::dom::bindings::codegen::Bindings::VRStageParametersBinding::VRStageParametersMethods;
use crate::dom::bindings::num::Finite;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector};
use crate::dom::bindings::root::DomRoot;
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext, JSObject};
use js::typedarray::{CreateWith, Float32Array};
use std::ptr;
use std::ptr::NonNull;
use webvr_traits::WebVRStageParameters;
#[dom_struct]
pub struct VRStageParameters {
reflector_: Reflector,
#[ignore_malloc_size_of = "Defined in rust-webvr"]
parameters: DomRefCell<WebVRStageParameters>,
transform: Heap<*mut JSObject>,
}
unsafe_no_jsmanaged_fields!(WebVRStageParameters);
impl VRStageParameters {
fn new_inherited(parameters: WebVRStageParameters) -> VRStageParameters {
VRStageParameters {
reflector_: Reflector::new(),
parameters: DomRefCell::new(parameters),
transform: Heap::default(),
}
}
#[allow(unsafe_code)]
pub fn new(
parameters: WebVRStageParameters,
global: &GlobalScope,
) -> DomRoot<VRStageParameters> {
let cx = global.get_cx();
rooted!(in (cx) let mut array = ptr::null_mut::<JSObject>());
unsafe {
let _ = Float32Array::create(
cx,
CreateWith::Slice(¶meters.sitting_to_standing_transform),
array.handle_mut(),
);
}
let stage_parameters = reflect_dom_object(
Box::new(VRStageParameters::new_inherited(parameters)),
global,
VRStageParametersBinding::Wrap,
);
stage_parameters.transform.set(array.get());
stage_parameters
}
#[allow(unsafe_code)]
pub fn update(&self, parameters: &WebVRStageParameters) {
unsafe {
let cx = self.global().get_cx();
typedarray!(in(cx) let array: Float32Array = self.transform.get());
if let Ok(mut array) = array {
array.update(¶meters.sitting_to_standing_transform);
}
}
*self.parameters.borrow_mut() = parameters.clone();
}
}
impl VRStageParametersMethods for VRStageParameters {
#[allow(unsafe_code)]
// https://w3c.github.io/webvr/#dom-vrstageparameters-sittingtostandingtransform
unsafe fn SittingToStandingTransform(&self, _cx: *mut JSContext) -> NonNull<JSObject> {
NonNull::new_unchecked(self.transform.get())
}
// https://w3c.github.io/webvr/#dom-vrstageparameters-sizex
fn SizeX(&self) -> Finite<f32> {
Finite::wrap(self.parameters.borrow().size_x)
}
// https://w3c.github.io/webvr/#dom-vrstageparameters-sizez<|fim▁hole|><|fim▁end|>
|
fn SizeZ(&self) -> Finite<f32> {
Finite::wrap(self.parameters.borrow().size_z)
}
}
|
<|file_name|>writefile.rs<|end_file_name|><|fim▁begin|>use std::io::File;
<|fim▁hole|>}<|fim▁end|>
|
fn main(){
let mut file = File::create(&Path::new("test.txt"));
file.write(bytes!("pewpewpew\n"));
|
<|file_name|>encoding.go<|end_file_name|><|fim▁begin|>// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package encoding
import "gonum.org/v1/gonum/graph"
// Builder is a graph that can have user-defined nodes and edges added.
type Builder interface {
graph.Graph
graph.Builder
}
// MultiBuilder is a graph that can have user-defined nodes and edges added.
type MultiBuilder interface {
graph.Multigraph
graph.MultigraphBuilder
}
// AttributeSetter is implemented by types that can set an encoded graph
// attribute.
type AttributeSetter interface {
SetAttribute(Attribute) error
}
// Attributer defines graph.Node or graph.Edge values that can
// specify graph attributes.
type Attributer interface {
Attributes() []Attribute
}
// Attribute is an encoded key value attribute pair use in graph encoding.
type Attribute struct {
Key, Value string
}
// Attributes is a helper type providing simple attribute handling.
type Attributes []Attribute
// Attributes returns all of the receiver's attributes.
func (a *Attributes) Attributes() []Attribute {
return *a
}
// SetAttribute sets attr in the receiver. Calling SetAttribute with an
// Attribute with a Key that is in the collection replaces the existing
// value and calling with an empty Value removes the attribute from the
// collection if it exists. SetAttribute always returns nil.
func (a *Attributes) SetAttribute(attr Attribute) error {
if attr.Key == "" {
return nil
}
for i, v := range *a {
if v.Key == attr.Key {
if attr.Value == "" {
(*a)[i] = (*a)[len(*a)-1]
*a = (*a)[:len(*a)-1]
return nil<|fim▁hole|> return nil
}
}
if attr.Value != "" {
*a = append(*a, attr)
}
return nil
}<|fim▁end|>
|
}
(*a)[i].Value = attr.Value
|
<|file_name|>chunks.rs<|end_file_name|><|fim▁begin|>use std::sync::{Arc, Mutex};
use simple_parallel;
use state::LSystem;
use super::{LProcessor, SimpleProcessor};
/// Parallel processor dividing a state into chunks to be individually iterated
/// within a pool of threads.
pub struct ChunksProcessor {
/// The number of symbols per full chunk.
chunk_size: usize,
/// The thread pool.
pool: simple_parallel::Pool,
}
impl ChunksProcessor {
/// Try and create a new 'ChunksProcessor' instance with the given parameters.
/// Typical values:
/// - max_tasks : number of CPU logical cores
/// - chunks_size : between 100_000 and 1_000_000 symbols per chunk
pub fn new(max_tasks: usize, chunks_size: usize) -> Result<ChunksProcessor, String> {
if max_tasks == 0 {
Err(format!("ChunksProcessor::new : invalid maximum tasks number ({})",
max_tasks))
} else if chunks_size == 0 {
Err(format!("ChunksProcessor::new : invalid chunks size ({})",
chunks_size))
} else {
Ok(ChunksProcessor {
chunk_size: chunks_size,
pool: simple_parallel::Pool::new(max_tasks),
})
}
}
}
impl<S> LProcessor<S> for ChunksProcessor
where S: Clone + Eq + Send + Sync
{
// TODO : better error handling...
fn iterate<'a>(&mut self, lsystem: &LSystem<'a, S>) -> Result<LSystem<'a, S>, String> {
// Set-up
let mut vec: Vec<Vec<S>> = Vec::new();
let state_len = lsystem.state().len();
if state_len == 0 {
return Err(format!("cannot iterate an empty state"));
}
let rem = state_len % self.chunk_size;
let chunks_number = state_len / self.chunk_size +
match rem {
0 => 0,
_ => 1,
};
for _ in 0..chunks_number {
vec.push(Vec::new());
}
let sub_states = Arc::new(Mutex::new(vec));
// Chunks processing
let rules = lsystem.rules().clone();
let errors = Mutex::new(String::new());
let chunks_iter = lsystem.state().chunks(self.chunk_size);
self.pool
.for_(chunks_iter.enumerate(), |(n, chunk)| {
let result: Vec<S> = match SimpleProcessor::iterate_slice(chunk, &rules) {
Ok(v) => v,
Err(why) => {
let mut error_lock = errors.lock().unwrap();
*error_lock = format!("{}\n{}", *error_lock, why);
Vec::new()
}
};
let mut chunk_data = sub_states.lock().unwrap();
chunk_data[n] = result;
});
// Error handling
let error_lock = errors.lock().unwrap();
if !error_lock.is_empty() {
return Err(format!("ChunksProcessor : iteration error(s):\n{}", *error_lock));
}
// Final assembling
let mut new_state_size = 0usize;
let mut new_state: Vec<S> = Vec::new();
let data = sub_states.lock().unwrap();
for n in 0..chunks_number {
let chunk_iterated = &data[n];
new_state_size = match new_state_size.checked_add(chunk_iterated.len()) {
Some(v) => v,
None => {
return Err(format!("ChunksProcessor::iterate : usize overflow, state too big \
for for Vec"))
}
};
new_state.extend(chunk_iterated.iter().cloned());
}
Ok(LSystem::<S>::new(new_state, rules, Some(lsystem.iteration() + 1)))
}
}
#[cfg(test)]
mod test {
use rules::HashMapRules;
use state::{LSystem, new_rules_value};
use interpret::TurtleCommand;
use process::{LProcessor, ChunksProcessor};
#[test]
fn chunks_processing() {
let mut rules = HashMapRules::new(); // algae rules
rules.set_str('A', "AB", TurtleCommand::None);
rules.set_str('B', "A", TurtleCommand::None);
let expected_sizes = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597,
2584, 4181, 6765, 10946, 17711, 28657, 46368];
let mut lsystem = LSystem::new_with_char("A", new_rules_value(rules));
let mut processor = ChunksProcessor::new(4, 10_000).ok().unwrap();<|fim▁hole|> for n in 0..expected_sizes.len() {
assert_eq!(lsystem.iteration(), n as u64);
assert_eq!(lsystem.state().len(), expected_sizes[n]);
lsystem = processor.iterate(&lsystem).ok().unwrap();
}
}
}<|fim▁end|>
| |
<|file_name|>union_find.rs<|end_file_name|><|fim▁begin|>pub struct UnionFind {
parents: Vec<usize>,
ranks: Vec<u64>
}
impl UnionFind {
pub fn new(size: usize) -> UnionFind {
UnionFind {
parents: (0..size).map(|i| i).collect(),
ranks: (0..size).map(|_| 0).collect()
}
}
pub fn find(&mut self, x: usize) -> usize {
let parent = self.parents[x];
if parent != x {
self.parents[x] = self.find(parent);
}
self.parents[x]
}
<|fim▁hole|>
if xr == yr {
return;
}
if self.ranks[xr] < self.ranks[yr] {
self.parents[xr] = yr;
} else if self.ranks[xr] > self.ranks[yr] {
self.parents[yr] = xr;
} else {
self.parents[yr] = xr;
self.ranks[xr] += 1;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let mut uf = UnionFind::new(10);
uf.union(3, 5);
uf.union(7, 8);
uf.union(7, 9);
uf.union(5, 9);
assert!(uf.find(3) == uf.find(8));
assert!(uf.find(3) != uf.find(6));
assert!(uf.find(2) != uf.find(6));
}
}<|fim▁end|>
|
pub fn union(&mut self, x: usize, y: usize) {
let xr = self.find(x);
let yr = self.find(y);
|
<|file_name|>cli.py<|end_file_name|><|fim▁begin|>"""Top-level import for all CLI-related functionality in apitools.
Note that importing this file will ultimately have side-effects, and
may require imports not available in all environments (such as App
Engine). In particular, picking up some readline-related imports can
cause pain.
"""
# pylint:disable=wildcard-import
<|fim▁hole|><|fim▁end|>
|
from googlecloudapis.apitools.base.py.app2 import *
from googlecloudapis.apitools.base.py.base_cli import *
|
<|file_name|>classes.py<|end_file_name|><|fim▁begin|>############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import operator
# for easier re-usage (because Python hasn't an enum type)
class Targets:
ALL_TARGETS = map(lambda x: 2 ** x , range(7))
(DESKTOP_474_GCC,
DESKTOP_480_DEFAULT,
SIMULATOR,
EMBEDDED_LINUX,
DESKTOP_521_DEFAULT,
DESKTOP_531_DEFAULT,
DESKTOP_541_GCC) = ALL_TARGETS
@staticmethod
def desktopTargetClasses():
desktopTargets = (sum(Targets.ALL_TARGETS) & ~Targets.SIMULATOR & ~Targets.EMBEDDED_LINUX)
if platform.system() == 'Darwin':
desktopTargets &= ~Targets.DESKTOP_541_GCC
return desktopTargets
@staticmethod
def qt4Classes():
return (Targets.DESKTOP_474_GCC | Targets.DESKTOP_480_DEFAULT
| Targets.SIMULATOR | Targets.EMBEDDED_LINUX)
@staticmethod
def getStringForTarget(target):
if target == Targets.DESKTOP_474_GCC:
return "Desktop 474 GCC"
elif target == Targets.DESKTOP_480_DEFAULT:
if platform.system() in ('Windows', 'Microsoft'):
return "Desktop 480 MSVC2010"
else:
return "Desktop 480 GCC"<|fim▁hole|> return "Qt Simulator"
elif target == Targets.EMBEDDED_LINUX:
return "Embedded Linux"
elif target == Targets.DESKTOP_521_DEFAULT:
return "Desktop 521 default"
elif target == Targets.DESKTOP_531_DEFAULT:
return "Desktop 531 default"
elif target == Targets.DESKTOP_541_GCC:
return "Desktop 541 GCC"
else:
return None
@staticmethod
def getTargetsAsStrings(targets):
if not isinstance(targets, (tuple,list)):
test.fatal("Wrong usage... This function handles only tuples or lists.")
return None
result = map(Targets.getStringForTarget, targets)
if None in result:
test.fatal("You've passed at least one unknown target!")
return result
@staticmethod
def intToArray(targets):
return filter(lambda x: x & targets, Targets.ALL_TARGETS)
@staticmethod
def arrayToInt(targetArr):
return reduce(operator.or_, targetArr, 0)
@staticmethod
def getDefaultKit():
return Targets.DESKTOP_521_DEFAULT
# this class holds some constants for easier usage inside the Projects view
class ProjectSettings:
BUILD = 1
RUN = 2
# this class defines some constants for the views of the creator's MainWindow
class ViewConstants:
WELCOME, EDIT, DESIGN, DEBUG, PROJECTS, HELP = range(6)
FIRST_AVAILABLE = 0
# always adjust the following to the highest value of the available ViewConstants when adding new
LAST_AVAILABLE = HELP
# this function returns a regex of the tooltip of the FancyTabBar elements
# this is needed because the keyboard shortcut is OS specific
# if the provided argument does not match any of the ViewConstants it returns None
@staticmethod
def getToolTipForViewTab(viewTab):
if viewTab == ViewConstants.WELCOME:
toolTip = ur'Switch to <b>Welcome</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.EDIT:
toolTip = ur'Switch to <b>Edit</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.DESIGN:
toolTip = ur'Switch to <b>Design</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.DEBUG:
toolTip = ur'Switch to <b>Debug</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.PROJECTS:
toolTip = ur'Switch to <b>Projects</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.HELP:
toolTip = ur'Switch to <b>Help</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
else:
return None
return toolTip % (viewTab + 1)
class SubprocessType:
QT_WIDGET=0
QT_QUICK_APPLICATION=1
QT_QUICK_UI=2
USER_DEFINED=3
@staticmethod
def getWindowType(subprocessType, qtQuickVersion="1.1"):
if subprocessType == SubprocessType.QT_WIDGET:
return "QMainWindow"
if subprocessType == SubprocessType.QT_QUICK_APPLICATION:
qqv = "2"
if qtQuickVersion[0] == "1":
qqv = "1"
return "QtQuick%sApplicationViewer" % qqv
if subprocessType == SubprocessType.QT_QUICK_UI:
if qtQuickVersion == "1.1":
return "QDeclarativeViewer"
else:
return "QQuickView"
if subprocessType == SubprocessType.USER_DEFINED:
return "user-defined"
test.fatal("Could not determine the WindowType for SubprocessType %s" % subprocessType)
return None
class QtInformation:
QT_VERSION = 0
QT_BINPATH = 1
QT_LIBPATH = 2
class LibType:
SHARED = 0
STATIC = 1
QT_PLUGIN = 2
@staticmethod
def getStringForLib(libType):
if libType == LibType.SHARED:
return "Shared Library"
if libType == LibType.STATIC:
return "Statically Linked Library"
if libType == LibType.QT_PLUGIN:
return "Qt Plugin"
return None
class Qt5Path:
DOCS = 0
EXAMPLES = 1
@staticmethod
def getPaths(pathSpec):
if pathSpec == Qt5Path.DOCS:
path52 = "/doc"
path53 = "/Docs/Qt-5.3"
path54 = "/Docs/Qt-5.4"
elif pathSpec == Qt5Path.EXAMPLES:
path52 = "/examples"
path53 = "/Examples/Qt-5.3"
path54 = "/Examples/Qt-5.4"
else:
test.fatal("Unknown pathSpec given: %s" % str(pathSpec))
return []
if platform.system() in ('Microsoft', 'Windows'):
return ["C:/Qt/Qt5.2.1/5.2.1/msvc2010" + path52,
"C:/Qt/Qt5.3.1" + path53, "C:/Qt/Qt5.4.1" + path54]
elif platform.system() == 'Linux':
if __is64BitOS__():
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/gcc_64" + path52,
"~/Qt5.3.1" + path53, "~/Qt5.4.1" + path54])
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/gcc" + path52,
"~/Qt5.3.1" + path53, "~/Qt5.4.1" + path54])
else:
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/clang_64" + path52,
"~/Qt5.3.1" + path53])<|fim▁end|>
|
elif target == Targets.SIMULATOR:
|
<|file_name|>freshen.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Freshening is the process of replacing unknown variables with fresh types. The idea is that
//! the type, after freshening, contains no inference variables but instead contains either a
//! value for each variable or fresh "arbitrary" types wherever a variable would have been.
//!
//! Freshening is used primarily to get a good type for inserting into a cache. The result
//! summarizes what the type inferencer knows "so far". The primary place it is used right now is
//! in the trait matching algorithm, which needs to be able to cache whether an `impl` self type
//! matches some other type X -- *without* affecting `X`. That means if that if the type `X` is in
//! fact an unbound type variable, we want the match to be regarded as ambiguous, because depending
//! on what type that type variable is ultimately assigned, the match may or may not succeed.
//!
//! Note that you should be careful not to allow the output of freshening to leak to the user in
//! error messages or in any other form. Freshening is only really useful as an internal detail.
//!
//! __An important detail concerning regions.__ The freshener also replaces *all* regions with
//! 'static. The reason behind this is that, in general, we do not take region relationships into
//! account when making type-overloaded decisions. This is important because of the design of the
//! region inferencer, which is not based on unification but rather on accumulating and then
//! solving a set of constraints. In contrast, the type inferencer assigns a value to each type
//! variable only once, and it does so as soon as it can, so it is reasonable to ask what the type
//! inferencer knows "so far".
use middle::ty::{self, Ty};
use middle::ty_fold;
use middle::ty_fold::TypeFoldable;
use middle::ty_fold::TypeFolder;
use std::collections::hash_map::{self, Entry};
use super::InferCtxt;
use super::unify_key::ToType;
pub struct TypeFreshener<'a, 'tcx:'a> {
infcx: &'a InferCtxt<'a, 'tcx>,
freshen_count: u32,
freshen_map: hash_map::HashMap<ty::InferTy, Ty<'tcx>>,
}
impl<'a, 'tcx> TypeFreshener<'a, 'tcx> {
pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> TypeFreshener<'a, 'tcx> {
TypeFreshener {
infcx: infcx,
freshen_count: 0,
freshen_map: hash_map::HashMap::new(),
}
}
fn freshen<F>(&mut self,
opt_ty: Option<Ty<'tcx>>,
key: ty::InferTy,
freshener: F)
-> Ty<'tcx> where
F: FnOnce(u32) -> ty::InferTy,
{
match opt_ty {
Some(ty) => { return ty.fold_with(self); }
None => { }
}
match self.freshen_map.entry(key) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let index = self.freshen_count;
self.freshen_count += 1;
let t = ty::mk_infer(self.infcx.tcx, freshener(index));
entry.insert(t);
t
}
}
}
}
impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> {
fn tcx<'b>(&'b self) -> &'b ty::ctxt<'tcx> {
self.infcx.tcx
}
fn fold_region(&mut self, r: ty::Region) -> ty::Region {
match r {
ty::ReEarlyBound(..) |
ty::ReLateBound(..) => {
// leave bound regions alone
r
}
ty::ReStatic |
ty::ReFree(_) |
ty::ReScope(_) |
ty::ReInfer(_) |
ty::ReEmpty => {
// replace all free regions with 'static
ty::ReStatic
}
}
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
if !ty::type_needs_infer(t) && !ty::type_has_erasable_regions(t) {
return t;
}
let tcx = self.infcx.tcx;
match t.sty {
ty::TyInfer(ty::TyVar(v)) => {
self.freshen(<|fim▁hole|> ty::FreshTy)
}
ty::TyInfer(ty::IntVar(v)) => {
self.freshen(
self.infcx.int_unification_table.borrow_mut()
.probe(v)
.map(|v| v.to_type(tcx)),
ty::IntVar(v),
ty::FreshIntTy)
}
ty::TyInfer(ty::FloatVar(v)) => {
self.freshen(
self.infcx.float_unification_table.borrow_mut()
.probe(v)
.map(|v| v.to_type(tcx)),
ty::FloatVar(v),
ty::FreshFloatTy)
}
ty::TyInfer(ty::FreshTy(c)) |
ty::TyInfer(ty::FreshIntTy(c)) |
ty::TyInfer(ty::FreshFloatTy(c)) => {
if c >= self.freshen_count {
tcx.sess.bug(
&format!("Encountered a freshend type with id {} \
but our counter is only at {}",
c,
self.freshen_count));
}
t
}
ty::TyBool |
ty::TyChar |
ty::TyInt(..) |
ty::TyUint(..) |
ty::TyFloat(..) |
ty::TyEnum(..) |
ty::TyBox(..) |
ty::TyStr |
ty::TyError |
ty::TyArray(..) |
ty::TySlice(..) |
ty::TyRawPtr(..) |
ty::TyRef(..) |
ty::TyBareFn(..) |
ty::TyTrait(..) |
ty::TyStruct(..) |
ty::TyClosure(..) |
ty::TyTuple(..) |
ty::TyProjection(..) |
ty::TyParam(..) => {
ty_fold::super_fold_ty(self, t)
}
}
}
}<|fim▁end|>
|
self.infcx.type_variables.borrow().probe(v),
ty::TyVar(v),
|
<|file_name|>byte-string-literals.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-tab
static FOO: &'static [u8] = b"\f"; //~ ERROR unknown byte escape
pub fn main() {
b"\f"; //~ ERROR unknown byte escape<|fim▁hole|> b"a //~ ERROR unterminated double quote byte string
}<|fim▁end|>
|
b"\x0Z"; //~ ERROR illegal character in numeric character escape: Z
b"é"; //~ ERROR byte constant must be ASCII
|
<|file_name|>FirebirdInterface.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
from downloadCommon import DownloadCommon, getSeqName
from DdlCommonInterface import DdlCommonInterface
import re
class FbDownloader(DownloadCommon):
def __init__(self):
self.strDbms = 'firebird'
def connect(self, info):
try:
import kinterbasdb
except:
print "Missing Firebird support through kinterbasdb"
return
self.strDbms = 'firebird'
self.version = info['version']
self.conn = kinterbasdb.connect(
dsn='localhost:%s' % info['dbname'],
user = info['user'],
password = info['pass'])
self.cursor = self.conn.cursor()
def useConnection(self, con, version):
self.conn = con
self.version = version
self.cursor = self.conn.cursor()
def getTables(self, tableList):
""" Returns the list of tables as a array of strings """
strQuery = "SELECT RDB$RELATION_NAME FROM RDB$RELATIONS WHERE RDB$SYSTEM_FLAG=0 AND RDB$VIEW_SOURCE IS NULL;"
self.cursor.execute(strQuery)
return self._confirmReturns([x[0].strip() for x in self.cursor.fetchall() ], tableList)
def getTableColumns(self, strTable):
""" Returns column in this format
(nColIndex, strColumnName, strColType, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, bNotNull, strDefault, auto_increment)
"""
strSql = """
SELECT RF.RDB$FIELD_POSITION, RF.RDB$FIELD_NAME, RDB$FIELD_TYPE, F.RDB$FIELD_LENGTH,
RDB$FIELD_PRECISION, RDB$FIELD_SCALE, RF.RDB$NULL_FLAG, RF.RDB$DEFAULT_SOURCE, F.RDB$FIELD_SUB_TYPE
FROM RDB$RELATION_FIELDS RF, RDB$FIELDS F
WHERE RF.RDB$RELATION_NAME = ?
AND RF.RDB$FIELD_SOURCE = F.RDB$FIELD_NAME
ORDER BY RF.RDB$FIELD_POSITION;"""
self.cursor.execute(strSql, [strTable])
rows = self.cursor.fetchall()
ret = []
# TODO auto_increment
bAutoIncrement = False
for row in rows:
attnum, name, nType, size, numsize, scale, attnull, default, sub_type = row
if scale and scale < 0:
scale = -scale
if not size and numprecradix == 10:
size = numsize
strType = self.convertTypeId(nType)
if sub_type == 1:
strType = 'numeric'
elif sub_type == 2:
strType = 'decimal'
if numsize > 0:
size = numsize
numsize = None
if strType == 'integer' and size == 4:
size = None
elif strType == 'date' and size == 4:
size = None
elif strType == 'float' and size == 4:
size = None
if default:
# Remove the 'DEFAULT ' part of the SQL
default = default.replace('DEFAULT ', '')
if self.hasAutoincrement(strTable, name):
bAutoIncrement = True
else:
bAutoIncrement = False
ret.append((name.strip(), strType, size, scale, attnull, default, bAutoIncrement))
return ret
def convertTypeId(self, nType):
types = {
261: 'blob',
14 : 'char',
40 : 'cstring',
11 : 'd_float',
27 : 'double',
10 : 'float',
16 : 'int64',
8 : 'integer',
9 : 'quad',
7 : 'smallint',<|fim▁hole|> }
strType = ''
if nType in types:
strType = types[nType]
if nType not in [14, 40, 37]:
size = None
else:
print "Uknown type %d" % (nType)
return strType
def hasAutoincrement(self, strTableName, strColName):
strSql = "SELECT RDB$GENERATOR_NAME FROM RDB$GENERATORS WHERE UPPER(RDB$GENERATOR_NAME)=UPPER(?);"
self.cursor.execute(strSql, [getSeqName(strTableName, strColName)[0:31]])
rows = self.cursor.fetchall()
if rows:
return True
return False
def getTableComment(self, strTableName):
""" Returns the comment as a string """
strSql = "SELECT RDB$DESCRIPTION FROM RDB$RELATIONS WHERE RDB$RELATION_NAME=?;"
self.cursor.execute(strSql, [strTableName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getColumnComment(self, strTableName, strColumnName):
""" Returns the comment as a string """
strSql = """SELECT RDB$DESCRIPTION
FROM RDB$RELATION_FIELDS
WHERE RDB$RELATION_NAME = ? AND RDB$FIELD_NAME = ?"""
self.cursor.execute(strSql, [strTableName, strColumnName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getTableIndexes(self, strTableName):
""" Returns
(strIndexName, [strColumns,], bIsUnique, bIsPrimary, bIsClustered)
or []
Warning the Primary key constraint cheats by knowing the name probably starts with pk_
"""
strSql = """SELECT RDB$INDEX_NAME, RDB$UNIQUE_FLAG
FROM RDB$INDICES
WHERE RDB$RELATION_NAME = '%s'
""" % (strTableName)
self.cursor.execute(strSql)
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
for row in rows:
(strIndexName, bIsUnique) = row
colList = self._fetchTableColumnsForIndex(strIndexName)
if strIndexName.lower().startswith('pk_'):
bIsPrimary = True
else:
bIsPrimary = False
strIndexName = strIndexName.strip()
ret.append((strIndexName, colList, bIsUnique, bIsPrimary, None))
return ret
def _fetchTableColumnsForIndex(self, strIndexName):
strSql = """SELECT RDB$FIELD_NAME
FROM RDB$INDEX_SEGMENTS
WHERE RDB$INDEX_NAME = ?
ORDER BY RDB$FIELD_POSITION
"""
self.cursor.execute(strSql, [strIndexName.strip()])
rows = self.cursor.fetchall()
return [row[0].strip() for row in rows]
def getTableRelations(self, strTableName):
""" Returns
(strConstraintName, colName, fk_table, fk_columns)
or []
"""
strSql = """SELECT RDB$CONSTRAINT_NAME
FROM RDB$RELATION_CONSTRAINTS
WHERE RDB$RELATION_NAME = '%s'
""" % (strTableName)
self.cursor.execute(strSql)
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
return ret
def _fetchTableColumnsNamesByNums(self, strTableName, nums):
strSql = """
SELECT pa.attname
FROM pg_attribute pa, pg_class pc
WHERE pa.attrelid = pc.oid
AND pa.attisdropped = 'f'
AND pc.relname = %s
AND pc.relkind = 'r'
AND pa.attnum in (%s)
ORDER BY pa.attnum
""" % ( '%s', ','.join(['%s' for num in nums]) )
self.cursor.execute(strSql, [strTableName] + nums)
rows = self.cursor.fetchall()
return [row[0] for row in rows]
def _decodeLength(self, type, atttypmod):
# gleamed from http://www.postgresql-websource.com/psql713/source-format_type.htm
VARHDRSZ = 4
if type == 'varchar':
return (atttypmod - VARHDRSZ, None)
if type == 'numeric':
atttypmod -= VARHDRSZ
return ( (atttypmod >> 16) & 0xffff, atttypmod & 0xffff)
if type == 'varbit' or type == 'bit':
return (atttypmod, None)
return (None, None)
def getViews(self, viewList):
strQuery = "SELECT RDB$VIEW_NAME FROM RDB$VIEW_RELATIONS"
#TODO add viewList constraint
self.cursor.execute(strQuery)
return self._confirmReturns([x[0].strip() for x in self.cursor.fetchall() ], viewList)
def getViewDefinition(self, strViewName):
strQuery = "SELECT RDB$RELATION_NAME, RDB$VIEW_SOURCE FROM RDB$RELATIONS WHERE RDB$RELATION_NAME = UPPER(?)"
self.cursor.execute(strQuery, [strViewName])
rows = self.cursor.fetchall()
if rows:
ret = rows[0][1].strip()
return ret
return ''
def getFunctions(self, functionList):
#strQuery = "SELECT RDB$FUNCTION_NAME FROM RDB$FUNCTIONS WHERE RDB$SYSTEM_FLAG = 0"
#TODO add functionList constraint
strQuery = "SELECT RDB$PROCEDURE_NAME FROM RDB$PROCEDURES WHERE RDB$SYSTEM_FLAG = 0"
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
return self._confirmReturns([x[0].strip() for x in rows], functionList)
def getFunctionDefinition(self, strSpecifiName):
""" Returns (routineName, parameters, return, language, definition) """
strQuery = "SELECT RDB$PROCEDURE_NAME, RDB$PROCEDURE_SOURCE FROM RDB$PROCEDURES WHERE RDB$SYSTEM_FLAG = 0 AND RDB$PROCEDURE_NAME = upper(?)"
self.cursor.execute(strQuery, [strSpecifiName])
rows = self.cursor.fetchall()
strProcName, strDefinition = rows[0]
strDefinition = strDefinition.strip()
strProcName = strProcName.strip()
strQuery = """SELECT PP.RDB$PARAMETER_NAME, PP.RDB$FIELD_SOURCE, PP.RDB$PARAMETER_TYPE, F.RDB$FIELD_TYPE, F.RDB$FIELD_LENGTH, F.RDB$FIELD_PRECISION, RDB$FIELD_SCALE
FROM RDB$PROCEDURE_PARAMETERS PP, RDB$FIELDS F
WHERE PP.RDB$PROCEDURE_NAME = upper(?)
AND PP.RDB$FIELD_SOURCE = F.RDB$FIELD_NAME
ORDER BY PP.RDB$PARAMETER_NUMBER"""
self.cursor.execute(strQuery, [strSpecifiName])
rows = self.cursor.fetchall()
args = []
rets = []
for row in rows:
strParamName, strSrc, nParamType, nType, nLen, nPrecision, nScale = row
strParamName = strParamName.strip().lower()
strSrc = strSrc.strip()
strType = self.convertTypeId(nType)
if nParamType == 0:
args.append(strParamName + ' ' + strType)
else:
if strParamName.lower() == 'ret':
rets.append(strType)
else:
rets.append(strParamName + ' ' + strType)
return (strProcName.lower(), args, ','.join(rets), '', strDefinition)
class DdlFirebird(DdlCommonInterface):
def __init__(self):
DdlCommonInterface.__init__(self, 'firebird')
self.params['max_id_len'] = { 'default' : 256 }
self.params['table_desc'] = ["UPDATE RDB$RELATIONS SET RDB$DESCRIPTION = %(desc)s\n\tWHERE RDB$RELATION_NAME = upper('%(table)s')"]
self.params['column_desc'] = ["UPDATE RDB$RELATION_FIELDS SET RDB$DESCRIPTION = %(desc)s\n\tWHERE RDB$RELATION_NAME = upper('%(table)s') AND RDB$FIELD_NAME = upper('%(column)s')"]
self.params['drop_constraints_on_col_rename'] = True
self.params['drop_table_has_cascade'] = False
self.params['alter_default'] = ['ALTER TABLE %(table_name)s ALTER %(column_name)s TYPE %(column_type)s']
self.params['rename_column'] = ['ALTER TABLE %(table_name)s ALTER %(old_col_name)s TO %(new_col_name)s']
self.params['alter_default'] = ['ALTER TABLE %(table_name)s ALTER COLUMN %(column_name)s SET DEFAULT %(new_default)s']
self.params['keywords'] = """
ACTION ACTIVE ADD ADMIN AFTER ALL ALTER AND ANY AS ASC ASCENDING AT AUTO AUTODDL AVG BASED BASENAME BASE_NAME
BEFORE BEGIN BETWEEN BLOB BLOBEDIT BUFFER BY CACHE CASCADE CAST CHAR CHARACTER CHARACTER_LENGTH CHAR_LENGTH
CHECK CHECK_POINT_LEN CHECK_POINT_LENGTH COLLATE COLLATION COLUMN COMMIT COMMITTED COMPILETIME COMPUTED CLOSE
CONDITIONAL CONNECT CONSTRAINT CONTAINING CONTINUE COUNT CREATE CSTRING CURRENT CURRENT_DATE CURRENT_TIME
CURRENT_TIMESTAMP CURSOR DATABASE DATE DAY DB_KEY DEBUG DEC DECIMAL DECLARE DEFAULT
DELETE DESC DESCENDING DESCRIBE DESCRIPTOR DISCONNECT DISPLAY DISTINCT DO DOMAIN DOUBLE DROP ECHO EDIT ELSE
END ENTRY_POINT ESCAPE EVENT EXCEPTION EXECUTE EXISTS EXIT EXTERN EXTERNAL EXTRACT FETCH FILE FILTER FLOAT
FOR FOREIGN FOUND FREE_IT FROM FULL FUNCTION GDSCODE GENERATOR GEN_ID GLOBAL GOTO GRANT GROUP GROUP_COMMIT_WAIT
GROUP_COMMIT_ WAIT_TIME HAVING HELP HOUR IF IMMEDIATE IN INACTIVE INDEX INDICATOR INIT INNER INPUT INPUT_TYPE
INSERT INT INTEGER INTO IS ISOLATION ISQL JOIN KEY LC_MESSAGES LC_TYPE LEFT LENGTH LEV LEVEL LIKE LOGFILE
LOG_BUFFER_SIZE LOG_BUF_SIZE LONG MANUAL MAX MAXIMUM MAXIMUM_SEGMENT MAX_SEGMENT MERGE MESSAGE MIN MINIMUM
MINUTE MODULE_NAME MONTH NAMES NATIONAL NATURAL NCHAR NO NOAUTO NOT NULL NUMERIC NUM_LOG_BUFS NUM_LOG_BUFFERS
OCTET_LENGTH OF ON ONLY OPEN OPTION OR ORDER OUTER OUTPUT OUTPUT_TYPE OVERFLOW PAGE PAGELENGTH PAGES PAGE_SIZE
PARAMETER PASSWORD PLAN POSITION POST_EVENT PRECISION PREPARE PROCEDURE PROTECTED PRIMARY PRIVILEGES PUBLIC QUIT
RAW_PARTITIONS RDB$DB_KEY READ REAL RECORD_VERSION REFERENCES RELEASE RESERV RESERVING RESTRICT RETAIN RETURN
RETURNING_VALUES RETURNS REVOKE RIGHT ROLE ROLLBACK RUNTIME SCHEMA SECOND SEGMENT SELECT SET SHADOW SHARED SHELL
SHOW SINGULAR SIZE SMALLINT SNAPSHOT SOME SORT SQLCODE SQLERROR SQLWARNING STABILITY STARTING STARTS STATEMENT
STATIC STATISTICS SUB_TYPE SUM SUSPEND TABLE TERMINATOR THEN TIME TIMESTAMP TO TRANSACTION TRANSLATE TRANSLATION
TRIGGER TRIM TYPE UNCOMMITTED UNION UNIQUE UPDATE UPPER USER USING VALUE VALUES VARCHAR VARIABLE VARYING VERSION
VIEW WAIT WEEKDAY WHEN WHENEVER WHERE WHILE WITH WORK WRITE YEAR YEARDAY""".split()
# Note you need to remove the constraints like:
# alter table table1 drop constraint pk_table1;
# before dropping the table (what a pain)
def addFunction(self, strNewFunctionName, argumentList, strReturn, strContents, attribs, diffs):
argumentList = [ '%s' % arg for arg in argumentList ]
info = {
'functionname' : self.quoteName(strNewFunctionName),
'arguments' : ', '.join(argumentList),
'returns' : strReturn,
'contents' : strContents.replace("'", "''"),
'language' : '',
}
if 'language' in attribs:
info['language'] = ' LANGUAGE %s' % (attribs['language'])
diffs.append(('Add function', # OR REPLACE
"CREATE PROCEDURE %(functionname)s(%(arguments)s) RETURNS (ret %(returns)s) AS \n%(contents)s;" % info )
)
def dropFunction(self, strOldFunctionName, argumentList, diffs):
info = {
'functionname' : self.quoteName(strOldFunctionName),
}
diffs.append(('Drop function',
'DROP PROCEDURE %(functionname)s' % info )
)<|fim▁end|>
|
12 : 'date',
13 : 'time',
35 : 'timestamp',
37 : 'varchar',
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>#[allow(clippy::unreadable_literal)]
#[allow(clippy::too_many_arguments)]
#[allow(clippy::match_same_arms)]
#[allow(clippy::type_complexity)]
mod auto;
pub use auto::*;
mod utils;
mod aggregator;<|fim▁hole|>
pub mod prelude {
pub use glib::prelude::*;
pub use gst::prelude::*;
pub use super::aggregator::AggregatorExtManual;
pub use super::aggregator_pad::AggregatorPadExtManual;
pub use super::auto::traits::*;
}
pub mod subclass;
mod ffi;
pub const AGGREGATOR_FLOW_NEED_DATA: gst::FlowError = gst::FlowError::CustomError;<|fim▁end|>
|
mod aggregator_pad;
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/* @flow */
/* global Navigator, navigator */
import config from 'config';
import * as React from 'react';
import { Helmet } from 'react-helmet';
import { connect } from 'react-redux';
import { withRouter } from 'react-router-dom';
import NestedStatus from 'react-nested-status';
import { compose } from 'redux';
// We have to import these styles first to have them listed first in the final
// CSS file. See: https://github.com/mozilla/addons-frontend/issues/3565
// The order is important: font files need to be first, with the subset after
// the full font file.
import 'fonts/inter.scss';
import 'fonts/inter-subset.scss';
import 'normalize.css/normalize.css';
import './styles.scss';
/* eslint-disable import/first */
import Routes from 'amo/components/Routes';
import ScrollToTop from 'amo/components/ScrollToTop';
import NotAuthorizedPage from 'amo/pages/ErrorPages/NotAuthorizedPage';
import NotFoundPage from 'amo/pages/ErrorPages/NotFoundPage';
import ServerErrorPage from 'amo/pages/ErrorPages/ServerErrorPage';
import { getClientAppAndLangFromPath, isValidClientApp } from 'amo/utils';
import { addChangeListeners } from 'amo/addonManager';
import {
setClientApp as setClientAppAction,
setUserAgent as setUserAgentAction,
} from 'amo/reducers/api';
import { setInstallState } from 'amo/reducers/installations';
import { CLIENT_APP_ANDROID } from 'amo/constants';
import ErrorPage from 'amo/components/ErrorPage';
import translate from 'amo/i18n/translate';
import log from 'amo/logger';
import type { AppState } from 'amo/store';
import type { DispatchFunc } from 'amo/types/redux';
import type { InstalledAddon } from 'amo/reducers/installations';
import type { I18nType } from 'amo/types/i18n';
import type { ReactRouterLocationType } from 'amo/types/router';
/* eslint-enable import/first */
interface MozNavigator extends Navigator {
mozAddonManager?: Object;
}
type PropsFromState = {|
clientApp: string,
lang: string,
userAgent: string | null,
|};
type DefaultProps = {|
_addChangeListeners: (callback: Function, mozAddonManager: Object) => any,
_navigator: typeof navigator | null,
mozAddonManager: $PropertyType<MozNavigator, 'mozAddonManager'>,
userAgent: string | null,
|};
type Props = {|
...PropsFromState,
...DefaultProps,
handleGlobalEvent: () => void,
i18n: I18nType,
location: ReactRouterLocationType,
setClientApp: (clientApp: string) => void,
setUserAgent: (userAgent: string) => void,
|};
export function getErrorPage(status: number | null): () => React.Node {
switch (status) {
case 401:
return NotAuthorizedPage;
case 404:
return NotFoundPage;
case 500:
default:
return ServerErrorPage;
}
}
export class AppBase extends React.Component<Props> {
scheduledLogout: TimeoutID;
static defaultProps: DefaultProps = {
_addChangeListeners: addChangeListeners,
_navigator: typeof navigator !== 'undefined' ? navigator : null,
mozAddonManager: config.get('server')
? {}
: (navigator: MozNavigator).mozAddonManager,
userAgent: null,
};
componentDidMount() {
const {
_addChangeListeners,
_navigator,
handleGlobalEvent,
mozAddonManager,
setUserAgent,
userAgent,
} = this.props;
// Use addonManager.addChangeListener to setup and filter events.
_addChangeListeners(handleGlobalEvent, mozAddonManager);
// If userAgent isn't set in state it could be that we couldn't get one
// from the request headers on our first (server) request. If that's the
// case we try to load them from navigator.
if (!userAgent && _navigator && _navigator.userAgent) {
log.info(
'userAgent not in state on App load; using navigator.userAgent.',
);
setUserAgent(_navigator.userAgent);
}
}
componentDidUpdate() {
const { clientApp, location, setClientApp } = this.props;
const { clientApp: clientAppFromURL } = getClientAppAndLangFromPath(
location.pathname,
);
if (isValidClientApp(clientAppFromURL) && clientAppFromURL !== clientApp) {
setClientApp(clientAppFromURL);
}
}
render(): React.Node {
const { clientApp, i18n, lang } = this.props;
const i18nValues = {
locale: lang,
};
let defaultTitle = i18n.sprintf(
i18n.gettext('Add-ons for Firefox (%(locale)s)'),
i18nValues,
);
let titleTemplate = i18n.sprintf(
i18n.gettext('%(title)s – Add-ons for Firefox (%(locale)s)'),
// We inject `%s` as a named argument to avoid localizer mistakes. Helmet
// will replace `%s` by the title supplied in other pages.
{ ...i18nValues, title: '%s' },
);
if (clientApp === CLIENT_APP_ANDROID) {
defaultTitle = i18n.sprintf(
i18n.gettext('Add-ons for Firefox Android (%(locale)s)'),
i18nValues,
);
titleTemplate = i18n.sprintf(
i18n.gettext('%(title)s – Add-ons for Firefox Android (%(locale)s)'),
// We inject `%s` as a named argument to avoid localizer mistakes.
// Helmet will replace `%s` by the title supplied in other pages.
{ ...i18nValues, title: '%s' },
);
}
return (
<NestedStatus code={200}>
<ScrollToTop>
<Helmet defaultTitle={defaultTitle} titleTemplate={titleTemplate} />
<ErrorPage getErrorComponent={getErrorPage}>
<Routes />
</ErrorPage>
</ScrollToTop>
</NestedStatus>
);
}
}
export const mapStateToProps = (state: AppState): PropsFromState => ({
clientApp: state.api.clientApp,
lang: state.api.lang,
userAgent: state.api.userAgent,
});
export function mapDispatchToProps(dispatch: DispatchFunc): {|
handleGlobalEvent: (payload: InstalledAddon) => void,
setClientApp: (clientApp: string) => void,
setUserAgent: (userAgent: string) => void,
|} {
return {
handleGlobalEvent(payload: InstalledAddon) {
dispatch(setInstallState(payload));
},
setClientApp(clientApp: string) {
dispatch(setClientAppAction(clientApp));
},
setUserAgent(userAgent: string) {
dispatch(setUserAgentAction(userAgent));<|fim▁hole|>
const App: React.ComponentType<Props> = compose(
withRouter,
connect(mapStateToProps, mapDispatchToProps),
translate(),
)(AppBase);
export default App;<|fim▁end|>
|
},
};
}
|
<|file_name|>miniterm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Very simple serial terminal
# (C)2002-2009 Chris Liechti <[email protected]>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes).
import sys, os, serial, threading, time
EXITCHARCTER = '\x1d' # GS/CTRL+]
MENUCHARACTER = '\x14' # Menu: CTRL+T
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-8s Send the menu character itself to remote
--- %(exchar)-8s Send the exit character to remote
--- %(info)-8s Show info
--- %(upload)-8s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)s RTS %(echo)s local echo
--- %(dtr)s DTR %(break)s BREAK
--- %(lfm)s line feed %(repr)s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unkown'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console:
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while 1:
z = msvcrt.getch()
if z == '\0' or z == '\xe0': # functions keys
msvcrt.getch()
else:
if z == '\r':
return '\n'
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console:
def __init__(self):
self.fd = sys.stdin.fileno()
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
console.setup()
sys.exitfunc = cleanup_console #terminal modes have to be restored on exit...
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = ('\n', '\r', '\r\n')
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm:
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1, stopbits=2)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1, stopbits=2)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def start(self):
self.alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(1)
self.receiver_thread.start()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(1)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
if not transmit_only:
self.receiver_thread.join()
# self.transmitter_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits,
))
sys.stderr.write('--- RTS %s\n' % (self.rts_state and 'active' or 'inactive'))
sys.stderr.write('--- DTR %s\n' % (self.dtr_state and 'active' or 'inactive'))
sys.stderr.write('--- BREAK %s\n' % (self.break_state and 'active' or 'inactive'))
sys.stderr.write('--- software flow control %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s\n' % (REPR_MODES[self.repr_mode],))
sys.stderr.write('--- linefeed: %s\n' % (LF_MODES[self.convert_outgoing],))
try:
sys.stderr.write('--- CTS: %s DSR: %s RI: %s CD: %s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive'),
))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
def reader(self):
"""loop and copy serial->console"""
while self.alive:
try:
data = self.serial.read(1)
# data = self.read()
# check for exit from device
if data == EXITCHARCTER:
self.stop()
break
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for character in data:
sys.stdout.write("%s " % character.encode('hex'))
sys.stdout.flush()
except serial.SerialException, e:
time.sleep(0.001)
continue
except TypeError as e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""loop and copy console->serial until EXITCHARCTER character is
found. when MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = console.getkey()
except KeyboardInterrupt:
c = '\x03'
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(c) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError, e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
#~ elif c in 'pP': # P -> change port XXX reader thread would exit
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError, e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\r':
pass
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(c) # send character
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
parser.add_option("-p", "--port",
dest = "port",
help = "port, a number (default 0) or a device name (deprecated option)",
default = None
)
parser.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = 9600
)
parser.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
parser.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
parser.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
parser.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
parser.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
parser.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
parser.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",<|fim▁hole|> default = 0
)
parser.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = None
)
parser.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = None
)
parser.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non error messages",
default = False
)
parser.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application...",
default = 0x1d
)
parser.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
if port is None: port = 0
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
miniterm.start()
miniterm.join(True)
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>from ConfigParser import DEFAULTSECT
from cmd import Cmd
import logging
import sys
import subprocess
import argparse
import datetime
from fibbing import FibbingManager
import fibbingnode
from fibbingnode.misc.utils import dump_threads
import signal
log = fibbingnode.log
CFG = fibbingnode.CFG
class FibbingCLI(Cmd):
Cmd.prompt = '> '
def __init__(self, mngr, *args, **kwargs):
self.fibbing = mngr
Cmd.__init__(self, *args, **kwargs)
def do_add_node(self, line=''):
"""Add a new fibbing node"""
self.fibbing.add_node()
def do_show_lsdb(self, line=''):
log.info(self.fibbing.root.lsdb)
def do_draw_network(self, line):
"""Draw the network as pdf in the given file"""
self.fibbing.root.lsdb.graph.draw(line)
def do_print_graph(self, line=''):
log.info('Current network graph: %s',<|fim▁hole|> """Print information about the fibbing network"""
self.fibbing.print_net()
def do_print_routes(self, line=''):
"""Print information about the fibbing routes"""
self.fibbing.print_routes()
def do_exit(self, line=''):
"""Exit the prompt"""
return True
def do_cfg(self, line=''):
part = line.split(' ')
val = part.pop()
key = part.pop()
sect = part.pop() if part else DEFAULTSECT
CFG.set(sect, key, val)
def do_call(self, line):
"""Execute a command on a node"""
items = line.split(' ')
try:
node = self.fibbing[items[0]]
node.call(*items[1:])
except KeyError:
log.error('Unknown node %s', items[0])
def do_add_route(self, line=''):
"""Setup a fibbing route
add_route network via1 metric1 via2 metric2 ..."""
items = line.split(' ')
if len(items) < 3:
log.error('route only takes at least 3 arguments: '
'network via_address metric')
else:
points = []
i = 2
while i < len(items):
points.append((items[i-1], items[i]))
i += 2
log.critical('Add route request at %s',
datetime.datetime.now().strftime('%H.%M.%S.%f'))
self.fibbing.install_route(items[0], points, True)
def do_rm_route(self, line):
"""Remove a route or parts of a route"""
items = line.split(' ')
if len(items) == 1:
ans = raw_input('Remove the WHOLE fibbing route for %s ? (y/N)'
% line)
if ans == 'y':
self.fibbing.remove_route(line)
else:
self.fibbing.remove_route_part(items[0], *items[1:])
def default(self, line):
"""Pass the command to the shell"""
args = line.split(' ')
if args[0] in self.fibbing.nodes:
self.do_call(' '.join(args))
else:
try:
log.info(subprocess.check_output(line, shell=True))
except Exception as e:
log.info('Command %s failed', line)
log.info(e.message)
def eval(self, line):
"""Interpret the given line ..."""
self.eval(line)
def do_ospfd(self, line):
"""Connect to the ospfd daemon of the given node"""
try:
self.fibbing[line].call('telnet', 'localhost', '2604')
except KeyError:
log.error('Unknown node %s', line)
def do_vtysh(self, line):
"""Execute a vtysh command on a node"""
items = line.split(' ')
try:
node = self.fibbing[items[0]]
result = node.vtysh(*items[1:], configure=False)
log.info(result)
except KeyError:
log.error('Unknown node %s', items[0])
def do_configure(self, line):
"""Execute a vtysh configure command on a node"""
items = line.split(' ')
try:
node = self.fibbing[items[0]]
result = node.vtysh(*items[1:], configure=True)
result = result.strip(' \n\t')
if result:
log.info(result)
except KeyError:
log.error('Unknown node %s', items[0])
def do_traceroute(self, line, max_ttl=10):
"""
Perform a simple traceroute between the source and an IP
:param max_ttl: the maximal ttl to use
"""
items = line.split(' ')
try:
node = self.fibbing[items[0]]
node.call('traceroute', '-q', '1', '-I',
'-m', str(max_ttl), '-w', '.1', items[1])
except KeyError:
log.error('Unknown node %s', items[0])
except ValueError:
log.error('This command takes 2 arguments: '
'source node and destination IP')
def do_dump(self, line=''):
dump_threads()
def handle_args():
parser = argparse.ArgumentParser(description='Starts a fibbing node.')
parser.add_argument('ports', metavar='IF', type=str, nargs='*',
help='A physical interface to use')
parser.add_argument('--debug', action='store_true', default=False,
help='Debug (default: disabled)')
parser.add_argument('--nocli', action='store_true', default=False,
help='Disable the CLI')
parser.add_argument('--cfg', help='Use specified config file',
default=None)
args = parser.parse_args()
instance_count = CFG.getint(DEFAULTSECT, 'controller_instance_number')
# Update default config
if args.cfg:
CFG.read(args.cfg)
fibbingnode.BIN = CFG.get(DEFAULTSECT, 'quagga_path')
# Check if we need to force debug mode
if args.debug:
CFG.set(DEFAULTSECT, 'debug', '1')
if CFG.getboolean(DEFAULTSECT, 'debug'):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# Check for any specified physical port to use both in config file
# or in args
ports = set(p for p in CFG.sections()
if not (p == 'fake' or p == 'physical' or p == DEFAULTSECT))
ports.update(args.ports)
if not ports:
log.warning('The fibbing node will not be connected '
'to any physical ports!')
else:
log.info('Using the physical ports: %s', ports)
return ports, instance_count, not args.nocli
def main(_CLI=FibbingCLI):
phys_ports, name, cli = handle_args()
if not cli:
fibbingnode.log_to_file('%s.log' % name)
mngr = FibbingManager(name)
def sig_handler(sig, frame):
mngr.cleanup()
fibbingnode.EXIT.set()
sys.exit()
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
try:
mngr.start(phys_ports=phys_ports)
if cli:
cli = _CLI(mngr=mngr)
cli.cmdloop()
fibbingnode.EXIT.set()
except Exception as e:
log.exception(e)
fibbingnode.EXIT.set()
finally:
fibbingnode.EXIT.wait()
mngr.cleanup()
if __name__ == '__main__':
main()<|fim▁end|>
|
self.fibbing.root.lsdb.graph.edges(data=True))
def do_print_net(self, line=''):
|
<|file_name|>test_gpu_demos.py<|end_file_name|><|fim▁begin|>import os
import subprocess
import sys
import pytest
sys.path.append("tests/python")
import testing as tm
import test_demos as td # noqa
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(td.PYTHON_DEMO_DIR, 'quantile_data_iterator.py')
cmd = ['python', script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, 'update_process.py')
cmd = ['python', script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, 'categorical.py')
cmd = ['python', script]<|fim▁hole|> subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_dask())
@pytest.mark.skipif(**tm.no_dask_cuda())
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_dask_training():
script = os.path.join(tm.PROJECT_ROOT, 'demo', 'dask', 'gpu_training.py')
cmd = ['python', script, '--ddqdm=1']
subprocess.check_call(cmd)
cmd = ['python', script, '--ddqdm=0']
subprocess.check_call(cmd)<|fim▁end|>
| |
<|file_name|>modulefinder1.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# https://docs.python.org/3/library/modulefinder.html
from modulefinder import ModuleFinder
finder = ModuleFinder()
finder.run_script('graph1.py')
print('Loaded modules:')
for name, mod in finder.modules.items():
print('%s: ' % name, end='')<|fim▁hole|> print(','.join(list(mod.globalnames.keys())[:3]))
print('-'*50)
print('Modules not imported:')
print('\n'.join(finder.badmodules.keys()))<|fim▁end|>
| |
<|file_name|>resources.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core';
import { IF_result } from '../shared/index';
import { ResourceService } from '../shared/api/resource.service';
/**
* 环境声明
* @type {any}
*/
declare var $:any;
/**
* interface - 资源单
*/
interface IF_resources {
isLoader : boolean;
isLast : boolean;
page : number;
limit : number;
total: number;
cityid : string;
adjusting : string;
created : string;
category : string;
keyword : string;
result : any[];
}
/**
* This class represents the lazy loaded ResourcesComponent.
*/
@Component({
moduleId: module.id,
selector: 'sd-resources',
templateUrl: 'resources.component.html',
styleUrls: ['resources.component.css'],
// providers: [ MobiscrollDirective ],
// directives: [ MobiscrollDirective ]
})
export class ResourcesComponent implements OnInit {
/**
* 属性
*/
errorMessage: string;
Categorys:any[] = [{id: 0,name: "全部",sortname: ""}];
Citys:any[] = [{id:0,name:'全部地区'}];
IsWeiXin: boolean = false;
Resources:IF_resources = {<|fim▁hole|> limit : 6,
total: 0,
cityid : "",
adjusting : "0",
created : "0",
category : "",
keyword : "",
result : []
};
/**
* 构造函数 - 创建服务的实例
* @param {ResourceService} public resourceService [description]
*/
constructor(
public resourceService: ResourceService
) {}
/**
* 初始化
*/
ngOnInit() {
$('#Resources').css({
'min-height' : $(window).height()
});
this.getResourceData();
this.getResourceList();
}
/**
* 获取产品类型和地区
*/
getResourceData() {
this.resourceService.getResourceData()
.subscribe(
result => {
// console.log(result);
if (result.success == "0") {
for (var key of Object.keys(result.data.cities)) {
this.Citys.push({
id: key,
name: result.data.cities[key]
});
}
for (var key of Object.keys(result.data.categories)) {
this.Categorys.push(result.data.categories[key]);
}
}
},
error => this.errorMessage = <any>error
);
}
/**
* 获取资源单列表
*/
getResourceList(isGetMore: boolean = false) {
if(!isGetMore){
this.Resources.result = [];
this.Resources.page = 1;
}
this.Resources.isLoader = true;
console.log(this.Resources)
this.resourceService.getResourceList(this.Resources)
.subscribe(
result => {
// console.log(result);
if (result.success == "0") {
this.Resources.total = Math.ceil(result.data.count / this.Resources.limit) ;
if(!isGetMore){
for (let value of result.data.Respurces) {
value.isMarquee = (this.realLength(value.description)*19) > (window.innerWidth-145);
}
this.Resources.result = result.data.Respurces;
}else{
for (let value of result.data.Respurces) {
value.isMarquee = (this.realLength(value.description)*19) > (window.innerWidth-145);
this.Resources.result.push(value);
}
}
this.Resources.isLoader = false;
this.Resources.isLast = (this.Resources.page >= this.Resources.total);
if (this.Resources.result.length > 0) {
this.renderScroll();
}
} else {
alert(result.message);
}
// console.log(this.Resources)
},
);
}
/**
* 字符串真实长度
* @param {any} str [description]
* @return {number} [description]
*/
realLength(str:any): number {
var L=0.0;
for(var i in str){
L+=(str.charCodeAt(i)>255)?1.0:0.5;
}
return Math.ceil(L);
}
/**
* 搜索框 清除文本 事件
* @param {any} e [description]
*/
seachTextClear(e:any) {
e.stopPropagation();
this.Resources.category = "";
}
/**
* 搜索框 focus 事件
* @param {any} e [description]
*/
seachTextFocus(e:any) {
$(e.target).parent().animate({width:"192px"},'fast');
}
/**
* 搜索框 blur 事件
* @param {any} e [description]
*/
seachTextBlur(e:any) {
$(e.target).parent().animate({width:"100px"},'fast');
setTimeout(()=> {
if ($.trim(this.Resources.category) == "") {
this.Resources.category = "";
}
this.getResourceList();
},300);
}
/**
* 排序&条件变更 事件
* @param {boolean = false} type [description]
*/
changeList(type:boolean = false) {
if (type) {
this.Resources.created = "0";
this.Resources.adjusting = "0";
$('#sel-default').mobiscroll('clear');
$('#sel-date').mobiscroll('clear');
}
this.getResourceList();
}
/**
* 获取更多资源单
*/
getMoreList() {
this.Resources.page+=1;
this.getResourceList(true);
}
renderScroll() {
setTimeout(()=> {
let intervalScroll: any[] = [];
$('.main_product').each(function(i:any,item:any) {
if ($(item).find('p').length > 1) {
intervalScroll[i] = setInterval(function() {
var $firstP = $(item).find('p:first'),
height = $firstP.height();
$firstP.animate({
height:0
},1000,'swing',function() {
$firstP.height(height);
$(item).append($firstP.clone());
$firstP.remove();
})
}, 3000);
}
});
let scrollMarquee: any[] = [];
$('.marquee span:not(.active)').each(function(i:any,item:any){
$(item).addClass('active');
var spanwidth = $(item).get(0).offsetWidth,
pWidth = $(item).parent().width(),
left = -18;
scrollMarquee[i] = setInterval(function(){
if (left <= spanwidth) {
left+=2;
$(item).css('left',-left);
} else {
$(item).css('left',pWidth);
left= -pWidth;
}
},50)
});
},300);
}
}<|fim▁end|>
|
isLoader : true,
isLast : false,
page : 1,
|
<|file_name|>ex6.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Use Netmiko to execute 'show arp' on pynet-rtr1, pynet-rtr2, and juniper-srx.
'''
from netmiko import ConnectHandler
from getpass import getpass
from routers import pynet_rtr1, pynet_rtr2, pynet_jnpr_srx1
<|fim▁hole|> '''
ip_address = raw_input("Please enter IP: ")
password = getpass()
pynet_rtr1['ip'] = ip_address
pynet_rtr2['ip'] = ip_address
pynet_jnpr_srx1['ip'] = ip_address
pynet_rtr1['password'] = password
pynet_rtr2['password'] = password
pynet_jnpr_srx1['password'] = password
#for each router send show arp command and print result
for router in (pynet_rtr1, pynet_rtr2, pynet_jnpr_srx1):
ssh_conn = ConnectHandler(verbose=False, **router)
output = ssh_conn.send_command('show arp')
print ">>> {}: \n".format(ssh_conn.ip)
print output
print ">>>\n"
if __name__ == '__main__':
main()<|fim▁end|>
|
def main():
'''
Use Netmiko to execute 'show arp' on pynet-rtr1, pynet-rtr2, and juniper-srx.
|
<|file_name|>ban_self_closing_custom_element_tagnames.js<|end_file_name|><|fim▁begin|>// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be<|fim▁hole|>
const {isLitHtmlTemplateCall} = require('./utils.js');
module.exports = {
meta: {
type: 'problem',
docs: {
description: 'Check for self closing custom element tag names in Lit templates.',
category: 'Possible Errors',
},
fixable: 'code',
schema: [] // no options
},
create: function(context) {
return {
TaggedTemplateExpression(node) {
const isLitHtmlCall = isLitHtmlTemplateCall(node);
if (!isLitHtmlCall) {
return;
}
const text = node.quasi.quasis.map(templatePart => templatePart.value.raw).join('@TEMPLATE_EXPRESSION()');
if (text.match(/<@TEMPLATE_EXPRESSION\(\)([^>]*?)\/>/)) {
context.report({
node,
message: 'Custom elements should not be self closing.',
});
}
},
};
}
};<|fim▁end|>
|
// found in the LICENSE file.
'use strict';
|
<|file_name|>test_signals.py<|end_file_name|><|fim▁begin|>from django.test import TestCase, Client
from sendgrid import utils, signals
import json
class SignalTestCase(TestCase):
def setUp(self):
self.client = Client()
self.email_data = {'subject': 'Test Subject',
'body': 'Hi, I am a test body',
'from_email': '[email protected]',
'to': ('[email protected]', )}
def test_received_email(self):
""" Test signals triggered by sendgrid callback.
"""
data = []
def email_event_handler(sender, signal):
data.append((sender, signal, ))
signals.email_event.connect(email_event_handler)
# check if we received signals
self.assertEqual(len(data), 0)
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# check if we received the signal triggered by the email creation
self.assertEqual(len(data), 1)
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': '[email protected]',
'uuid': message.uuid,
'event': 'processed',
'timestamp': '123456789',
}, ]),
content_type='application/json')
# verify that we received a signal
self.assertEqual(len(data), 2)
self.assertEqual(data[1][0].event, 'processed')
self.assertEqual(data[1][0].uuid, message.uuid)
self.assertEqual(response.status_code, 200)
def test_dupe_signals(self):
""" Test handling of duplicate signals.
"""
data = []
def email_event_handler(sender, signal):
data.append((sender, signal, ))
signals.email_event.connect(email_event_handler)
# check if we received signals
self.assertEqual(len(data), 0)
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# check if we received the signal triggered by the email creation
self.assertEqual(len(data), 1)
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': '[email protected]',
'uuid': message.uuid,
'event': 'delivered',
'timestamp': '123456789',
}, ]),
content_type='application/json')<|fim▁hole|> self.assertEqual(data[1][0].event, 'delivered')
self.assertEqual(data[1][0].uuid, message.uuid)
self.assertEqual(response.status_code, 200)
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': '[email protected]',
'uuid': message.uuid,
'event': 'delivered',
'timestamp': '123456790',
}, ]),
content_type='application/json')
# verify that we received a signal
self.assertEqual(len(data), 2)
self.assertEqual(response.status_code, 200)<|fim▁end|>
|
# verify that we received a signal
self.assertEqual(len(data), 2)
|
<|file_name|>registry.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##<|fim▁hole|>## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Registry definition for fixture datasets."""
from flask.ext.registry import RegistryProxy
from invenio.ext.registry import ModuleAutoDiscoveryRegistry
from invenio.utils.datastructures import LazyDict
fixtures_proxy = RegistryProxy(
'fixtures', ModuleAutoDiscoveryRegistry, 'fixtures')
def fixtures_loader():
"""Load fixtures datasets."""
out = {}
for fixture in fixtures_proxy:
for data in getattr(fixture, '__all__', dir(fixture)):
if data[-4:] != 'Data' or data in out:
continue
out[data] = getattr(fixture, data)
return out
fixtures = LazyDict(fixtures_loader)<|fim▁end|>
|
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
<|file_name|>debugSmoke.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { Viewlet } from '../workbench/viewlet';
import { Commands } from '../workbench/workbench';
import { Code, findElement } from '../../vscode/code';
import { Editors } from '../editor/editors';
import { Editor } from '../editor/editor';
import { IElement } from '../../vscode/driver';
const VIEWLET = 'div[id="workbench.view.debug"]';
const DEBUG_VIEW = `${VIEWLET} .debug-view-content`;
const CONFIGURE = `div[id="workbench.parts.sidebar"] .actions-container .configure`;
const STOP = `.debug-toolbar .debug-action.stop`;
const STEP_OVER = `.debug-toolbar .debug-action.step-over`;
const STEP_IN = `.debug-toolbar .debug-action.step-into`;
const STEP_OUT = `.debug-toolbar .debug-action.step-out`;
const CONTINUE = `.debug-toolbar .debug-action.continue`;
const GLYPH_AREA = '.margin-view-overlays>:nth-child';
const BREAKPOINT_GLYPH = '.debug-breakpoint';
const PAUSE = `.debug-toolbar .debug-action.pause`;
const DEBUG_STATUS_BAR = `.statusbar.debugging`;
const NOT_DEBUG_STATUS_BAR = `.statusbar:not(debugging)`;
const TOOLBAR_HIDDEN = `.debug-toolbar[aria-hidden="true"]`;
const STACK_FRAME = `${VIEWLET} .monaco-list-row .stack-frame`;
const SPECIFIC_STACK_FRAME = filename => `${STACK_FRAME} .file[title*="${filename}"]`;
const VARIABLE = `${VIEWLET} .debug-variables .monaco-list-row .expression`;
const CONSOLE_OUTPUT = `.repl .output.expression .value`;
const CONSOLE_INPUT_OUTPUT = `.repl .input-output-pair .output.expression .value`;
const REPL_FOCUSED = '.repl-input-wrapper .monaco-editor textarea';
export interface IStackFrame {
name: string;
lineNumber: number;
}
function toStackFrame(element: IElement): IStackFrame {
const name = findElement(element, e => /\bfile-name\b/.test(e.className))!;
const line = findElement(element, e => /\bline-number\b/.test(e.className))!;
const lineNumber = line.textContent ? parseInt(line.textContent.split(':').shift() || '0') : 0;
return {
name: name.textContent || '',
lineNumber
};
}
export class Debug extends Viewlet {
constructor(code: Code, private commands: Commands, private editors: Editors, private editor: Editor) {
super(code);
}
async openDebugViewlet(): Promise<any> {
if (process.platform === 'darwin') {
await this.code.dispatchKeybinding('cmd+shift+d');<|fim▁hole|> } else {
await this.code.dispatchKeybinding('ctrl+shift+d');
}
await this.code.waitForElement(DEBUG_VIEW);
}
async configure(): Promise<any> {
await this.code.waitAndClick(CONFIGURE);
await this.editors.waitForEditorFocus('launch.json');
}
async setBreakpointOnLine(lineNumber: number): Promise<any> {
await this.code.waitForElement(`${GLYPH_AREA}(${lineNumber})`);
await this.code.waitAndClick(`${GLYPH_AREA}(${lineNumber})`, 5, 5);
await this.code.waitForElement(BREAKPOINT_GLYPH);
}
async startDebugging(): Promise<number> {
await this.code.dispatchKeybinding('f5');
await this.code.waitForElement(PAUSE);
await this.code.waitForElement(DEBUG_STATUS_BAR);
const portPrefix = 'Port: ';
const output = await this.waitForOutput(output => output.some(line => line.indexOf(portPrefix) >= 0));
const lastOutput = output.filter(line => line.indexOf(portPrefix) >= 0)[0];
return lastOutput ? parseInt(lastOutput.substr(portPrefix.length)) : 3000;
}
async stepOver(): Promise<any> {
await this.code.waitAndClick(STEP_OVER);
}
async stepIn(): Promise<any> {
await this.code.waitAndClick(STEP_IN);
}
async stepOut(): Promise<any> {
await this.code.waitAndClick(STEP_OUT);
}
async continue(): Promise<any> {
await this.code.waitAndClick(CONTINUE);
await this.waitForStackFrameLength(0);
}
async stopDebugging(): Promise<any> {
await this.code.waitAndClick(STOP);
await this.code.waitForElement(TOOLBAR_HIDDEN);
await this.code.waitForElement(NOT_DEBUG_STATUS_BAR);
}
async waitForStackFrame(func: (stackFrame: IStackFrame) => boolean, message: string): Promise<IStackFrame> {
const elements = await this.code.waitForElements(STACK_FRAME, true, elements => elements.some(e => func(toStackFrame(e))));
return elements.map(toStackFrame).filter(s => func(s))[0];
}
async waitForStackFrameLength(length: number): Promise<any> {
await this.code.waitForElements(STACK_FRAME, false, result => result.length === length);
}
async focusStackFrame(name: string, message: string): Promise<any> {
await this.code.waitAndClick(SPECIFIC_STACK_FRAME(name), 0, 0);
await this.editors.waitForTab(name);
}
async waitForReplCommand(text: string, accept: (result: string) => boolean): Promise<void> {
await this.commands.runCommand('Debug: Focus on Debug Console View');
await this.code.waitForActiveElement(REPL_FOCUSED);
await this.code.waitForSetValue(REPL_FOCUSED, text);
// Wait for the keys to be picked up by the editor model such that repl evalutes what just got typed
await this.editor.waitForEditorContents('debug:replinput', s => s.indexOf(text) >= 0);
await this.code.dispatchKeybinding('enter');
await this.code.waitForElement(CONSOLE_INPUT_OUTPUT);
await this.waitForOutput(output => accept(output[output.length - 1] || ''));
}
async waitForVariableCount(count: number): Promise<void> {
await this.code.waitForElements(VARIABLE, false, els => els.length === count);
}
private async waitForOutput(fn: (output: string[]) => boolean): Promise<string[]> {
const elements = await this.code.waitForElements(CONSOLE_OUTPUT, false, elements => fn(elements.map(e => e.textContent)));
return elements.map(e => e.textContent);
}
}<|fim▁end|>
| |
<|file_name|>0008_auto_20150809_1341.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('let_me_app', '0007_auto_20150723_2238'),
]
operations = [<|fim▁hole|> field=models.DateTimeField(verbose_name='date started', db_index=True),
preserve_default=True,
),
]<|fim▁end|>
|
migrations.AlterField(
model_name='event',
name='start_at',
|
<|file_name|>OI.java<|end_file_name|><|fim▁begin|>package team.gif;
import edu.wpi.first.wpilibj.Joystick;
import edu.wpi.first.wpilibj.buttons.Button;
import edu.wpi.first.wpilibj.buttons.DigitalIOButton;
import edu.wpi.first.wpilibj.buttons.JoystickButton;
import team.gif.commands.*;
public class OI {
public static final Joystick leftStick = new Joystick(1);
public static final rightStick = new Joystick(2);
public static final auxStick = new Joystick(3);
private final Button leftTrigger = new JoystickButton(leftStick, 1);
private final Button right2 = new JoystickButton(rightStick, 2);
private final Button right3 = new JoystickButton(rightStick, 3);
private final Button right6 = new JoystickButton(rightStick, 6);
private final Button right7 = new JoystickButton(rightStick, 7);
public static final Button auxTrigger = new JoystickButton(rightStick, 1);
public OI() {
leftTrigger.whileHeld(new ShifterHigh());
right2.whileHeld(new CollectorReceive());
right2.whenPressed(new EarsOpen());
right3.whileHeld(new CollectorPass());
right3.whenPressed(new EarsOpen());
right3.whenReleased(new CollectorStandby());
right3.whenReleased(new EarsClosed());
right6.whileHeld(new BumperUp());
right7.whileHeld(new CollectorRaise());<|fim▁hole|><|fim▁end|>
|
}
}
|
<|file_name|>fake_plumlib.py<|end_file_name|><|fim▁begin|># Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, [email protected], PLUMgrid, Inc.
from neutron.extensions import providernet as provider
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Plumlib():
"""
Class PLUMgrid Fake Library. This library is a by-pass implementation
for the PLUMgrid Library. This class is being used by the unit test
integration in Neutron.
"""
def __init__(self):
LOG.info(_('Python PLUMgrid Fake Library Started '))
pass
def director_conn(self, director_plumgrid, director_port, timeout,
director_admin, director_password):
LOG.info(_('Fake Director: %s'),
director_plumgrid + ':' + director_port)
pass
def create_network(self, tenant_id, net_db, network):<|fim▁hole|> net_db["network"] = {}
for key in (provider.NETWORK_TYPE,
provider.PHYSICAL_NETWORK,
provider.SEGMENTATION_ID):
net_db["network"][key] = network["network"][key]
return net_db
def update_network(self, tenant_id, net_id):
pass
def delete_network(self, net_db, net_id):
pass
def create_subnet(self, sub_db, net_db, ipnet):
pass
def update_subnet(self, orig_sub_db, new_sub_db, ipnet):
pass
def delete_subnet(self, tenant_id, net_db, net_id):
pass
def create_port(self, port_db, router_db):
pass
def update_port(self, port_db, router_db):
pass
def delete_port(self, port_db, router_db):
pass
def create_router(self, tenant_id, router_db):
pass
def update_router(self, router_db, router_id):
pass
def delete_router(self, tenant_id, router_id):
pass
def add_router_interface(self, tenant_id, router_id, port_db, ipnet):
pass
def remove_router_interface(self, tenant_id, net_id, router_id):
pass
def create_floatingip(self, floating_ip):
pass
def update_floatingip(self, floating_ip_orig, floating_ip, id):
pass
def delete_floatingip(self, floating_ip_orig, id):
pass
def disassociate_floatingips(self, fip, port_id):
return dict((key, fip[key]) for key in ("id", "floating_network_id",
"floating_ip_address"))<|fim▁end|>
| |
<|file_name|>git.rs<|end_file_name|><|fim▁begin|>use std::collections::HashMap;
use std::env;
use crate::config;
use crate::errors::*;
use crate::ConfigScope;
pub struct Repo {
repo: git2::Repository,
}
impl Repo {
pub fn new() -> Result<Self> {
let repo = env::current_dir()
.chain_err(|| "")
.and_then(|current_dir| git2::Repository::discover(current_dir).chain_err(|| ""))?;
Ok(Repo { repo })
}
pub fn config(&self) -> Result<Config> {
self.repo
.config()
.map(|config| Config { config })
.chain_err(|| "")
}
pub fn auto_include(&self, filename: &str) -> Result<()> {
let include_path = format!("../{}", filename);
let workdir = match self.repo.workdir() {
Some(dir) => dir,
_ => {
return Ok(());
}
};
let mut path_buf = workdir.to_path_buf();
path_buf.push(filename);
if !path_buf.exists() {
return Ok(());
}
let include_paths = self.include_paths()?;
if include_paths.contains(&include_path) {
return Ok(());
}
let mut config = self.local_config()?;
config
.set_multivar("include.path", "^$", &include_path)
.and(Ok(()))
.chain_err(|| "")
}
fn include_paths(&self) -> Result<Vec<String>> {
let config = self.local_config()?;
let include_paths: Vec<String> = config
.entries(Some("include.path"))
.chain_err(|| "")?
.into_iter()
.map(|entry| {
entry
.chain_err(|| "")
.and_then(|entry| entry.value().map(String::from).ok_or_else(|| "".into()))
})
.collect::<Result<_>>()?;
Ok(include_paths)
}
fn local_config(&self) -> Result<git2::Config> {
let config = self.repo.config().chain_err(|| "")?;
config.open_level(git2::ConfigLevel::Local).chain_err(|| "")
}
}
pub struct Config {
config: git2::Config,
}
impl Config {
pub fn new(scope: ConfigScope) -> Result<Self> {
let config = match scope {
ConfigScope::Local => git2::Config::open_default(),
ConfigScope::Global => git2::Config::open_default().and_then(|mut r| r.open_global()),
};
config.map(|config| Config { config }).chain_err(|| "")
}
}
impl config::Config for Config {
fn get(&self, name: &str) -> Result<String> {
self.config
.get_string(name)
.chain_err(|| format!("error getting git config for '{}'", name))
}
fn get_all(&self, glob: &str) -> Result<HashMap<String, String>> {
let mut result = HashMap::new();
let entries = self
.config
.entries(Some(glob))
.chain_err(|| "error getting git config entries")?;
for entry in &entries {
let entry = entry.chain_err(|| "error getting git config entry")?;
if let (Some(name), Some(value)) = (entry.name(), entry.value()) {<|fim▁hole|> }
fn add(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_multivar(name, "^$", value)
.chain_err(|| format!("error adding git config '{}': '{}'", name, value))
}
fn set(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_str(name, value)
.chain_err(|| format!("error setting git config '{}': '{}'", name, value))
}
fn clear(&mut self, name: &str) -> Result<()> {
self.config
.remove(name)
.chain_err(|| format!("error removing git config '{}'", name))
}
}<|fim▁end|>
|
result.insert(name.into(), value.into());
}
}
Ok(result)
|
<|file_name|>bulk.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile
from urllib import quote, unquote
from xml.sax import saxutils
from swift.common.swob import Request, HTTPBadGateway, \
HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \
wsgify
from swift.common.utils import json, TRUE_VALUES
from swift.common.constraints import check_utf8, MAX_FILE_SIZE
from swift.common.http import HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED, \
HTTP_NOT_FOUND
from swift.common.constraints import MAX_OBJECT_NAME_LENGTH, \
MAX_CONTAINER_NAME_LENGTH
MAX_PATH_LENGTH = MAX_OBJECT_NAME_LENGTH + MAX_CONTAINER_NAME_LENGTH + 2
class CreateContainerError(Exception):
def __init__(self, msg, status_int, status):
self.status_int = status_int
self.status = status
Exception.__init__(self, msg)
ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml',
'text/xml']
def get_response_body(data_format, data_dict, error_list):
"""
Returns a properly formatted response body according to format.
:params data_format: resulting format<|fim▁hole|> :params data_dict: generated data about results.
:params error_list: list of quoted filenames that failed
"""
if data_format == 'text/plain':
output = ''
for key in sorted(data_dict.keys()):
output += '%s: %s\n' % (key, data_dict[key])
output += 'Errors:\n'
output += '\n'.join(
['%s, %s' % (name, status)
for name, status in error_list])
return output
if data_format == 'application/json':
data_dict['Errors'] = error_list
return json.dumps(data_dict)
if data_format.endswith('/xml'):
output = '<?xml version="1.0" encoding="UTF-8"?>\n<delete>\n'
for key in sorted(data_dict.keys()):
xml_key = key.replace(' ', '_').lower()
output += '<%s>%s</%s>\n' % (xml_key, data_dict[key], xml_key)
output += '<errors>\n'
output += '\n'.join(
['<object>'
'<name>%s</name><status>%s</status>'
'</object>' % (saxutils.escape(name), status) for
name, status in error_list])
output += '</errors>\n</delete>\n'
return output
raise HTTPNotAcceptable('Invalid output type')
class Bulk(object):
"""
Middleware that will do many operations on a single request.
Extract Archive:
Expand tar files into a swift account. Request must be a PUT with the
query parameter ?extract-archive=format specifying the format of archive
file. Accepted formats are tar, tar.gz, and tar.bz2.
For a PUT to the following url:
/v1/AUTH_Account/$UPLOAD_PATH?extract-archive=tar.gz
UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a
container, a pseudo-directory within a container, or an empty string. The
destination of a file in the archive will be built as follows:
/v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH
Where FILE_PATH is the file name from the listing in the tar file.
If the UPLOAD_PATH is an empty string, containers will be auto created
accordingly and files in the tar that would not map to any container (files
in the base directory) will be ignored.
Only regular files will be uploaded. Empty directories, symlinks, etc will
not be uploaded.
If all valid files were uploaded successfully will return an HTTPCreated
response. If any files failed to be created will return an HTTPBadGateway
response. In both cases the response body will specify the number of files
successfully uploaded and a list of the files that failed. The return body
will be formatted in the way specified in the request's Accept header.
Acceptable formats are text/plain, application/json, application/xml, and
text/xml.
There are proxy logs created for each file (which becomes a subrequest) in
the tar. The subrequest's proxy log will have a swift.source set to "EA"
the log's content length will reflect the unzipped size of the file. If
double proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the unexpanded size of the tar.gz).
Bulk Delete:
Will delete multiple objects or containers from their account with a
single request. Responds to DELETE requests with query parameter
?bulk-delete set. The Content-Type should be set to text/plain.
The body of the DELETE request will be a newline separated list of url
encoded objects to delete. You can only delete 1000 (configurable) objects
per request. The objects specified in the DELETE request body must be URL
encoded and in the form:
/container_name/obj_name
or for a container (which must be empty at time of delete)
/container_name
If all items were successfully deleted (or did not exist), will return an
HTTPOk. If any failed to delete, will return an HTTPBadGateway. In
both cases the response body will specify the number of items
successfully deleted, not found, and a list of those that failed.
The return body will be formatted in the way specified in the request's
Accept header. Acceptable formats are text/plain, application/json,
application/xml, and text/xml.
There are proxy logs created for each object or container (which becomes a
subrequest) that is deleted. The subrequest's proxy log will have a
swift.source set to "BD" the log's content length of 0. If double
proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the list of objects/containers to be deleted).
"""
def __init__(self, app, conf):
self.app = app
self.max_containers = int(
conf.get('max_containers_per_extraction', 10000))
self.max_failed_extractions = int(
conf.get('max_failed_extractions', 1000))
self.max_deletes_per_request = int(
conf.get('max_deletes_per_request', 1000))
def create_container(self, req, container_path):
"""
Makes a subrequest to create a new container.
:params container_path: an unquoted path to a container to be created
:returns: None on success
:raises: CreateContainerError on creation error
"""
new_env = req.environ.copy()
new_env['PATH_INFO'] = container_path
new_env['swift.source'] = 'EA'
create_cont_req = Request.blank(container_path, environ=new_env)
resp = create_cont_req.get_response(self.app)
if resp.status_int // 100 != 2:
raise CreateContainerError(
"Create Container Failed: " + container_path,
resp.status_int, resp.status)
def get_objs_to_delete(self, req):
"""
Will populate objs_to_delete with data from request input.
:params req: a Swob request
:returns: a list of the contents of req.body when separated by newline.
:raises: HTTPException on failures
"""
line = ''
data_remaining = True
objs_to_delete = []
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPBadRequest('Invalid request: no content sent.')
while data_remaining:
if len(objs_to_delete) > self.max_deletes_per_request:
raise HTTPRequestEntityTooLarge(
'Maximum Bulk Deletes: %d per request' %
self.max_deletes_per_request)
if '\n' in line:
obj_to_delete, line = line.split('\n', 1)
objs_to_delete.append(unquote(obj_to_delete))
else:
data = req.body_file.read(MAX_PATH_LENGTH)
if data:
line += data
else:
data_remaining = False
if line.strip():
objs_to_delete.append(unquote(line))
if len(line) > MAX_PATH_LENGTH * 2:
raise HTTPBadRequest('Invalid File Name')
return objs_to_delete
def handle_delete(self, req, objs_to_delete=None, user_agent='BulkDelete',
swift_source='BD'):
"""
:params req: a swob Request
:raises HTTPException: on unhandled errors
:returns: a swob Response
"""
try:
vrs, account, _junk = req.split_path(2, 3, True)
except ValueError:
return HTTPNotFound(request=req)
incoming_format = req.headers.get('Content-Type')
if incoming_format and not incoming_format.startswith('text/plain'):
# For now only accept newline separated object names
return HTTPNotAcceptable(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if not out_content_type:
return HTTPNotAcceptable(request=req)
if objs_to_delete is None:
objs_to_delete = self.get_objs_to_delete(req)
failed_files = []
success_count = not_found_count = 0
failed_file_response_type = HTTPBadRequest
for obj_to_delete in objs_to_delete:
obj_to_delete = obj_to_delete.strip().lstrip('/')
if not obj_to_delete:
continue
delete_path = '/'.join(['', vrs, account, obj_to_delete])
if not check_utf8(delete_path):
failed_files.append([quote(delete_path),
HTTPPreconditionFailed().status])
continue
new_env = req.environ.copy()
new_env['PATH_INFO'] = delete_path
del(new_env['wsgi.input'])
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s %s' % (req.environ.get('HTTP_USER_AGENT'), user_agent)
new_env['swift.source'] = swift_source
delete_obj_req = Request.blank(delete_path, new_env)
resp = delete_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
success_count += 1
elif resp.status_int == HTTP_NOT_FOUND:
not_found_count += 1
elif resp.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
else:
if resp.status_int // 100 == 5:
failed_file_response_type = HTTPBadGateway
failed_files.append([quote(delete_path), resp.status])
resp_body = get_response_body(
out_content_type,
{'Number Deleted': success_count,
'Number Not Found': not_found_count},
failed_files)
if (success_count or not_found_count) and not failed_files:
return HTTPOk(resp_body, content_type=out_content_type)
if failed_files:
return failed_file_response_type(
resp_body, content_type=out_content_type)
return HTTPBadRequest('Invalid bulk delete.')
def handle_extract(self, req, compress_type):
"""
:params req: a swob Request
:params compress_type: specifying the compression type of the tar.
Accepts '', 'gz, or 'bz2'
:raises HTTPException: on unhandled errors
:returns: a swob response to request
"""
success_count = 0
failed_files = []
existing_containers = set()
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if not out_content_type:
return HTTPNotAcceptable(request=req)
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
return HTTPBadRequest('Invalid request: no content sent.')
try:
vrs, account, extract_base = req.split_path(2, 3, True)
except ValueError:
return HTTPNotFound(request=req)
extract_base = extract_base or ''
extract_base = extract_base.rstrip('/')
try:
tar = tarfile.open(mode='r|' + compress_type,
fileobj=req.body_file)
while True:
tar_info = tar.next()
if tar_info is None or \
len(failed_files) >= self.max_failed_extractions:
break
if tar_info.isfile():
obj_path = tar_info.name
if obj_path.startswith('./'):
obj_path = obj_path[2:]
obj_path = obj_path.lstrip('/')
if extract_base:
obj_path = extract_base + '/' + obj_path
if '/' not in obj_path:
continue # ignore base level file
destination = '/'.join(
['', vrs, account, obj_path])
container = obj_path.split('/', 1)[0]
if not check_utf8(destination):
failed_files.append(
[quote(destination[:MAX_PATH_LENGTH]),
HTTPPreconditionFailed().status])
continue
if tar_info.size > MAX_FILE_SIZE:
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
HTTPRequestEntityTooLarge().status])
continue
if container not in existing_containers:
try:
self.create_container(
req, '/'.join(['', vrs, account, container]))
existing_containers.add(container)
except CreateContainerError, err:
if err.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
err.status])
continue
except ValueError:
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
HTTP_BAD_REQUEST])
continue
if len(existing_containers) > self.max_containers:
return HTTPBadRequest(
'More than %d base level containers in tar.' %
self.max_containers)
tar_file = tar.extractfile(tar_info)
new_env = req.environ.copy()
new_env['wsgi.input'] = tar_file
new_env['PATH_INFO'] = destination
new_env['CONTENT_LENGTH'] = tar_info.size
new_env['swift.source'] = 'EA'
new_env['HTTP_USER_AGENT'] = \
'%s BulkExpand' % req.environ.get('HTTP_USER_AGENT')
create_obj_req = Request.blank(destination, new_env)
resp = create_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
success_count += 1
else:
if resp.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]), resp.status])
resp_body = get_response_body(
out_content_type,
{'Number Files Created': success_count},
failed_files)
if success_count and not failed_files:
return HTTPCreated(resp_body, content_type=out_content_type)
if failed_files:
return HTTPBadGateway(resp_body, content_type=out_content_type)
return HTTPBadRequest('Invalid Tar File: No Valid Files')
except tarfile.TarError, tar_error:
return HTTPBadRequest('Invalid Tar File: %s' % tar_error)
@wsgify
def __call__(self, req):
extract_type = req.params.get('extract-archive')
if extract_type is not None and req.method == 'PUT':
archive_type = {
'tar': '', 'tar.gz': 'gz',
'tar.bz2': 'bz2'}.get(extract_type.lower().strip('.'))
if archive_type is not None:
return self.handle_extract(req, archive_type)
else:
return HTTPBadRequest("Unsupported archive format")
if 'bulk-delete' in req.params and req.method == 'DELETE':
return self.handle_delete(req)
return self.app
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def bulk_filter(app):
return Bulk(app, conf)
return bulk_filter<|fim▁end|>
| |
<|file_name|>MyRecycleview.java<|end_file_name|><|fim▁begin|>package com.yezi.text.widget;
import android.content.Intent;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.SwitchCompat;
import android.view.View;
import android.widget.CompoundButton;
import com.yezi.text.activity.AdapterSampleActivity;
import com.yezi.text.activity.AnimatorSampleActivity;
import com.yezi.text.R;
public class MyRecycleview extends AppCompatActivity {
private boolean enabledGrid = false;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.mycycleview);
findViewById(R.id.btn_animator_sample).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent i = new Intent(MyRecycleview.this, AnimatorSampleActivity.class);
i.putExtra("GRID", enabledGrid);
startActivity(i);
}
});
findViewById(R.id.btn_adapter_sample).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent i = new Intent(MyRecycleview.this, AdapterSampleActivity.class);
i.putExtra("GRID", enabledGrid);
startActivity(i);
}
});
<|fim▁hole|> ((SwitchCompat) findViewById(R.id.grid)).setOnCheckedChangeListener(
new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
enabledGrid = isChecked;
}
});
}
}<|fim▁end|>
| |
<|file_name|>serve.go<|end_file_name|><|fim▁begin|>// FUSE service loop, for servers that wish to use it.
package fs // import "bazil.org/fuse/fs"
import (
"encoding/binary"
"fmt"
"hash/fnv"
"io"
"log"
"reflect"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/net/context"
)
import (
"bytes"
"bazil.org/fuse"
"bazil.org/fuse/fuseutil"
)
const (
attrValidTime = 1 * time.Minute
entryValidTime = 1 * time.Minute
)
// CtxTagKey is the type used for unique context keys.
type CtxTagKey int
const (
// CtxHeaderUIDKey is the context key for header UIDs.
CtxHeaderUIDKey CtxTagKey = iota
)
// TODO: FINISH DOCS
// An FS is the interface required of a file system.
//
// Other FUSE requests can be handled by implementing methods from the
// FS* interfaces, for example FSStatfser.
type FS interface {
// Root is called to obtain the Node for the file system root.
Root() (Node, error)
}
type FSStatfser interface {
// Statfs is called to obtain file system metadata.
// It should write that data to resp.
Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error
}
type FSDestroyer interface {
// Destroy is called when the file system is shutting down.
//
// Linux only sends this request for block device backed (fuseblk)
// filesystems, to allow them to flush writes to disk before the
// unmount completes.
Destroy()
}
type FSInodeGenerator interface {
// GenerateInode is called to pick a dynamic inode number when it
// would otherwise be 0.
//
// Not all filesystems bother tracking inodes, but FUSE requires
// the inode to be set, and fewer duplicates in general makes UNIX
// tools work better.
//
// Operations where the nodes may return 0 inodes include Getattr,
// Setattr and ReadDir.
//
// If FS does not implement FSInodeGenerator, GenerateDynamicInode
// is used.
//
// Implementing this is useful to e.g. constrain the range of
// inode values used for dynamic inodes.
GenerateInode(parentInode uint64, name string) uint64
}
// A Node is the interface required of a file or directory.
// See the documentation for type FS for general information
// pertaining to all methods.
//
// A Node must be usable as a map key, that is, it cannot be a
// function, map or slice.
//
// Other FUSE requests can be handled by implementing methods from the
// Node* interfaces, for example NodeOpener.
//
// Methods returning Node should take care to return the same Node
// when the result is logically the same instance. Without this, each
// Node will get a new NodeID, causing spurious cache invalidations,
// extra lookups and aliasing anomalies. This may not matter for a
// simple, read-only filesystem.
type Node interface {
// Attr fills attr with the standard metadata for the node.
//
// Fields with reasonable defaults are prepopulated. For example,
// all times are set to a fixed moment when the program started.
//
// If Inode is left as 0, a dynamic inode number is chosen.
//
// The result may be cached for the duration set in Valid.
Attr(ctx context.Context, attr *fuse.Attr) error
}
type NodeGetattrer interface {
// Getattr obtains the standard metadata for the receiver.
// It should store that metadata in resp.
//
// If this method is not implemented, the attributes will be
// generated based on Attr(), with zero values filled in.
Getattr(ctx context.Context, req *fuse.GetattrRequest, resp *fuse.GetattrResponse) error
}
type NodeSetattrer interface {
// Setattr sets the standard metadata for the receiver.
//
// Note, this is also used to communicate changes in the size of
// the file, outside of Writes.
//
// req.Valid is a bitmask of what fields are actually being set.
// For example, the method should not change the mode of the file
// unless req.Valid.Mode() is true.
Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error
}
type NodeSymlinker interface {
// Symlink creates a new symbolic link in the receiver, which must be a directory.
//
// TODO is the above true about directories?
Symlink(ctx context.Context, req *fuse.SymlinkRequest) (Node, error)
}
// This optional request will be called only for symbolic link nodes.
type NodeReadlinker interface {
// Readlink reads a symbolic link.
Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error)
}
type NodeLinker interface {
// Link creates a new directory entry in the receiver based on an
// existing Node. Receiver must be a directory.
Link(ctx context.Context, req *fuse.LinkRequest, old Node) (Node, error)
}
type NodeRemover interface {
// Remove removes the entry with the given name from
// the receiver, which must be a directory. The entry to be removed
// may correspond to a file (unlink) or to a directory (rmdir).
Remove(ctx context.Context, req *fuse.RemoveRequest) error
}
type NodeAccesser interface {
// Access checks whether the calling context has permission for
// the given operations on the receiver. If so, Access should
// return nil. If not, Access should return EPERM.
//
// Note that this call affects the result of the access(2) system
// call but not the open(2) system call. If Access is not
// implemented, the Node behaves as if it always returns nil
// (permission granted), relying on checks in Open instead.
Access(ctx context.Context, req *fuse.AccessRequest) error
}
type NodeStringLookuper interface {
// Lookup looks up a specific entry in the receiver,
// which must be a directory. Lookup should return a Node
// corresponding to the entry. If the name does not exist in
// the directory, Lookup should return ENOENT.
//
// Lookup need not to handle the names "." and "..".
Lookup(ctx context.Context, name string) (Node, error)
}
type NodeRequestLookuper interface {
// Lookup looks up a specific entry in the receiver.
// See NodeStringLookuper for more.
Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (Node, error)
}
type NodeMkdirer interface {
Mkdir(ctx context.Context, req *fuse.MkdirRequest) (Node, error)
}
type NodeOpener interface {
// Open opens the receiver. After a successful open, a client
// process has a file descriptor referring to this Handle.
//
// Open can also be also called on non-files. For example,
// directories are Opened for ReadDir or fchdir(2).
//
// If this method is not implemented, the open will always
// succeed, and the Node itself will be used as the Handle.
//
// XXX note about access. XXX OpenFlags.
Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (Handle, error)
}
type NodeCreater interface {
// Create creates a new directory entry in the receiver, which
// must be a directory.
Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (Node, Handle, error)
}
type NodeForgetter interface {
// Forget about this node. This node will not receive further
// method calls.
//
// Forget is not necessarily seen on unmount, as all nodes are
// implicitly forgotten as part part of the unmount.
Forget()
}
type NodeRenamer interface {
Rename(ctx context.Context, req *fuse.RenameRequest, newDir Node) error
}
type NodeMknoder interface {
Mknod(ctx context.Context, req *fuse.MknodRequest) (Node, error)
}
// TODO this should be on Handle not Node
type NodeFsyncer interface {
Fsync(ctx context.Context, req *fuse.FsyncRequest) error
}
type NodeGetxattrer interface {
// Getxattr gets an extended attribute by the given name from the
// node.
//
// If there is no xattr by that name, returns fuse.ErrNoXattr.
Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error
}
type NodeListxattrer interface {
// Listxattr lists the extended attributes recorded for the node.
Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error
}
type NodeSetxattrer interface {
// Setxattr sets an extended attribute with the given name and
// value for the node.
Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
}
type NodeRemovexattrer interface {
// Removexattr removes an extended attribute for the name.
//
// If there is no xattr by that name, returns fuse.ErrNoXattr.
Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error
}
var startTime = time.Now()
func nodeAttr(ctx context.Context, n Node, attr *fuse.Attr) error {
attr.Valid = attrValidTime
attr.Nlink = 1
attr.Atime = startTime
attr.Mtime = startTime
attr.Ctime = startTime
attr.Crtime = startTime
if err := n.Attr(ctx, attr); err != nil {
return err
}
return nil
}
// A Handle is the interface required of an opened file or directory.
// See the documentation for type FS for general information
// pertaining to all methods.
//
// Other FUSE requests can be handled by implementing methods from the
// Handle* interfaces. The most common to implement are HandleReader,
// HandleReadDirer, and HandleWriter.
//
// TODO implement methods: Getlk, Setlk, Setlkw
type Handle interface {
}
type HandleFlusher interface {
// Flush is called each time the file or directory is closed.
// Because there can be multiple file descriptors referring to a
// single opened file, Flush can be called multiple times.
Flush(ctx context.Context, req *fuse.FlushRequest) error
}
type HandleReadAller interface {
ReadAll(ctx context.Context) ([]byte, error)
}
type HandleReadDirAller interface {
ReadDirAll(ctx context.Context) ([]fuse.Dirent, error)
}
type HandleReader interface {
// Read requests to read data from the handle.
//
// There is a page cache in the kernel that normally submits only
// page-aligned reads spanning one or more pages. However, you
// should not rely on this. To see individual requests as
// submitted by the file system clients, set OpenDirectIO.
//
// Note that reads beyond the size of the file as reported by Attr
// are not even attempted (except in OpenDirectIO mode).
Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error
}
type HandleWriter interface {
// Write requests to write data into the handle at the given offset.
// Store the amount of data written in resp.Size.
//
// There is a writeback page cache in the kernel that normally submits
// only page-aligned writes spanning one or more pages. However,
// you should not rely on this. To see individual requests as
// submitted by the file system clients, set OpenDirectIO.
//
// Writes that grow the file are expected to update the file size
// (as seen through Attr). Note that file size changes are
// communicated also through Setattr.
Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error
}
type HandleReleaser interface {
Release(ctx context.Context, req *fuse.ReleaseRequest) error
}
type Config struct {
// Function to send debug log messages to. If nil, use fuse.Debug.
// Note that changing this or fuse.Debug may not affect existing
// calls to Serve.
//
// See fuse.Debug for the rules that log functions must follow.
Debug func(msg interface{})
// Function to put things into context for processing the request.
// The returned context must have ctx as its parent.
//
// Note that changing this may not affect existing calls to Serve.
//
// Must not retain req.
WithContext func(ctx context.Context, req fuse.Request) context.Context
}
// New returns a new FUSE server ready to serve this kernel FUSE
// connection.
//
// Config may be nil.
func New(conn *fuse.Conn, config *Config) *Server {
s := &Server{
conn: conn,
req: map[fuse.RequestID]*serveRequest{},
nodeRef: map[Node]fuse.NodeID{},
dynamicInode: GenerateDynamicInode,
}
if config != nil {
s.debug = config.Debug
s.context = config.WithContext
}
if s.debug == nil {
s.debug = fuse.Debug
}
return s
}
type Server struct {
// set in New
conn *fuse.Conn
debug func(msg interface{})
context func(ctx context.Context, req fuse.Request) context.Context
// set once at Serve time
fs FS
dynamicInode func(parent uint64, name string) uint64
// state, protected by meta
meta sync.Mutex
req map[fuse.RequestID]*serveRequest
node []*serveNode
nodeRef map[Node]fuse.NodeID
handle []*serveHandle
freeNode []fuse.NodeID
freeHandle []fuse.HandleID
nodeGen uint64
// Used to ensure worker goroutines finish before Serve returns
wg sync.WaitGroup
}
// Serve serves the FUSE connection by making calls to the methods
// of fs and the Nodes and Handles it makes available. It returns only
// when the connection has been closed or an unexpected error occurs.
func (s *Server) Serve(fs FS) error {
defer s.wg.Wait() // Wait for worker goroutines to complete before return
s.fs = fs
if dyn, ok := fs.(FSInodeGenerator); ok {
s.dynamicInode = dyn.GenerateInode
}
root, err := fs.Root()
if err != nil {
return fmt.Errorf("cannot obtain root node: %v", err)
}
// Recognize the root node if it's ever returned from Lookup,
// passed to Invalidate, etc.
s.nodeRef[root] = 1
s.node = append(s.node, nil, &serveNode{
inode: 1,
generation: s.nodeGen,
node: root,
refs: 1,
})
s.handle = append(s.handle, nil)
for {
req, err := s.conn.ReadRequest()
if err != nil {
if err == io.EOF {
break
}
return err
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.serve(req)
}()
}
return nil
}
// Serve serves a FUSE connection with the default settings. See
// Server.Serve.
func Serve(c *fuse.Conn, fs FS) error {
server := New(c, nil)
return server.Serve(fs)
}
type nothing struct{}
type serveRequest struct {
Request fuse.Request
cancel func()
}
type serveNode struct {
inode uint64
generation uint64
node Node
refs uint64
// Delay freeing the NodeID until waitgroup is done. This allows
// using the NodeID for short periods of time without holding the
// Server.meta lock.
//
// Rules:
//
// - hold Server.meta while calling wg.Add, then unlock
// - do NOT try to reacquire Server.meta
wg sync.WaitGroup
}
func (sn *serveNode) attr(ctx context.Context, attr *fuse.Attr) error {
err := nodeAttr(ctx, sn.node, attr)
if attr.Inode == 0 {
attr.Inode = sn.inode
}
return err
}
type serveHandle struct {
handle Handle
readData []byte
nodeID fuse.NodeID
}
// NodeRef is deprecated. It remains here to decrease code churn on
// FUSE library users. You may remove it from your program now;
// returning the same Node values are now recognized automatically,
// without needing NodeRef.
type NodeRef struct{}<|fim▁hole|>func (c *Server) saveNode(inode uint64, node Node) (id fuse.NodeID, gen uint64) {
c.meta.Lock()
defer c.meta.Unlock()
if id, ok := c.nodeRef[node]; ok {
sn := c.node[id]
sn.refs++
return id, sn.generation
}
sn := &serveNode{inode: inode, node: node, refs: 1}
if n := len(c.freeNode); n > 0 {
id = c.freeNode[n-1]
c.freeNode = c.freeNode[:n-1]
c.node[id] = sn
c.nodeGen++
} else {
id = fuse.NodeID(len(c.node))
c.node = append(c.node, sn)
}
sn.generation = c.nodeGen
c.nodeRef[node] = id
return id, sn.generation
}
func (c *Server) saveHandle(handle Handle, nodeID fuse.NodeID) (id fuse.HandleID) {
c.meta.Lock()
shandle := &serveHandle{handle: handle, nodeID: nodeID}
if n := len(c.freeHandle); n > 0 {
id = c.freeHandle[n-1]
c.freeHandle = c.freeHandle[:n-1]
c.handle[id] = shandle
} else {
id = fuse.HandleID(len(c.handle))
c.handle = append(c.handle, shandle)
}
c.meta.Unlock()
return
}
type nodeRefcountDropBug struct {
N uint64
Refs uint64
Node fuse.NodeID
}
func (n *nodeRefcountDropBug) String() string {
return fmt.Sprintf("bug: trying to drop %d of %d references to %v", n.N, n.Refs, n.Node)
}
func (c *Server) dropNode(id fuse.NodeID, n uint64) (forget bool) {
c.meta.Lock()
defer c.meta.Unlock()
snode := c.node[id]
if snode == nil {
// this should only happen if refcounts kernel<->us disagree
// *and* two ForgetRequests for the same node race each other;
// this indicates a bug somewhere
c.debug(nodeRefcountDropBug{N: n, Node: id})
// we may end up triggering Forget twice, but that's better
// than not even once, and that's the best we can do
return true
}
if n > snode.refs {
c.debug(nodeRefcountDropBug{N: n, Refs: snode.refs, Node: id})
n = snode.refs
}
snode.refs -= n
if snode.refs == 0 {
snode.wg.Wait()
c.node[id] = nil
delete(c.nodeRef, snode.node)
c.freeNode = append(c.freeNode, id)
return true
}
return false
}
func (c *Server) dropHandle(id fuse.HandleID) {
c.meta.Lock()
c.handle[id] = nil
c.freeHandle = append(c.freeHandle, id)
c.meta.Unlock()
}
type missingHandle struct {
Handle fuse.HandleID
MaxHandle fuse.HandleID
}
func (m missingHandle) String() string {
return fmt.Sprint("missing handle: ", m.Handle, m.MaxHandle)
}
// Returns nil for invalid handles.
func (c *Server) getHandle(id fuse.HandleID) (shandle *serveHandle) {
c.meta.Lock()
defer c.meta.Unlock()
if id < fuse.HandleID(len(c.handle)) {
shandle = c.handle[uint(id)]
}
if shandle == nil {
c.debug(missingHandle{
Handle: id,
MaxHandle: fuse.HandleID(len(c.handle)),
})
}
return
}
type request struct {
Op string
Request *fuse.Header
In interface{} `json:",omitempty"`
}
func (r request) String() string {
return fmt.Sprintf("<- %s", r.In)
}
type logResponseHeader struct {
ID fuse.RequestID
}
func (m logResponseHeader) String() string {
return fmt.Sprintf("ID=%v", m.ID)
}
type response struct {
Op string
Request logResponseHeader
Out interface{} `json:",omitempty"`
// Errno contains the errno value as a string, for example "EPERM".
Errno string `json:",omitempty"`
// Error may contain a free form error message.
Error string `json:",omitempty"`
}
func (r response) errstr() string {
s := r.Errno
if r.Error != "" {
// prefix the errno constant to the long form message
s = s + ": " + r.Error
}
return s
}
func (r response) String() string {
switch {
case r.Errno != "" && r.Out != nil:
return fmt.Sprintf("-> [%v] %v error=%s", r.Request, r.Out, r.errstr())
case r.Errno != "":
return fmt.Sprintf("-> [%v] %s error=%s", r.Request, r.Op, r.errstr())
case r.Out != nil:
// make sure (seemingly) empty values are readable
switch r.Out.(type) {
case string:
return fmt.Sprintf("-> [%v] %s %q", r.Request, r.Op, r.Out)
case []byte:
return fmt.Sprintf("-> [%v] %s [% x]", r.Request, r.Op, r.Out)
default:
return fmt.Sprintf("-> [%v] %v", r.Request, r.Out)
}
default:
return fmt.Sprintf("-> [%v] %s", r.Request, r.Op)
}
}
type notification struct {
Op string
Node fuse.NodeID
Out interface{} `json:",omitempty"`
Err string `json:",omitempty"`
}
func (n notification) String() string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "=> %s %v", n.Op, n.Node)
if n.Out != nil {
// make sure (seemingly) empty values are readable
switch n.Out.(type) {
case string:
fmt.Fprintf(&buf, " %q", n.Out)
case []byte:
fmt.Fprintf(&buf, " [% x]", n.Out)
default:
fmt.Fprintf(&buf, " %s", n.Out)
}
}
if n.Err != "" {
fmt.Fprintf(&buf, " Err:%v", n.Err)
}
return buf.String()
}
type logMissingNode struct {
MaxNode fuse.NodeID
}
func opName(req fuse.Request) string {
t := reflect.Indirect(reflect.ValueOf(req)).Type()
s := t.Name()
s = strings.TrimSuffix(s, "Request")
return s
}
type logLinkRequestOldNodeNotFound struct {
Request *fuse.Header
In *fuse.LinkRequest
}
func (m *logLinkRequestOldNodeNotFound) String() string {
return fmt.Sprintf("In LinkRequest (request %v), node %d not found", m.Request.Hdr().ID, m.In.OldNode)
}
type renameNewDirNodeNotFound struct {
Request *fuse.Header
In *fuse.RenameRequest
}
func (m *renameNewDirNodeNotFound) String() string {
return fmt.Sprintf("In RenameRequest (request %v), node %d not found", m.Request.Hdr().ID, m.In.NewDir)
}
type handlerPanickedError struct {
Request interface{}
Err interface{}
}
var _ error = handlerPanickedError{}
func (h handlerPanickedError) Error() string {
return fmt.Sprintf("handler panicked: %v", h.Err)
}
var _ fuse.ErrorNumber = handlerPanickedError{}
func (h handlerPanickedError) Errno() fuse.Errno {
if err, ok := h.Err.(fuse.ErrorNumber); ok {
return err.Errno()
}
return fuse.DefaultErrno
}
// handlerTerminatedError happens when a handler terminates itself
// with runtime.Goexit. This is most commonly because of incorrect use
// of testing.TB.FailNow, typically via t.Fatal.
type handlerTerminatedError struct {
Request interface{}
}
var _ error = handlerTerminatedError{}
func (h handlerTerminatedError) Error() string {
return fmt.Sprintf("handler terminated (called runtime.Goexit)")
}
var _ fuse.ErrorNumber = handlerTerminatedError{}
func (h handlerTerminatedError) Errno() fuse.Errno {
return fuse.DefaultErrno
}
type handleNotReaderError struct {
handle Handle
}
var _ error = handleNotReaderError{}
func (e handleNotReaderError) Error() string {
return fmt.Sprintf("handle has no Read: %T", e.handle)
}
var _ fuse.ErrorNumber = handleNotReaderError{}
func (e handleNotReaderError) Errno() fuse.Errno {
return fuse.ENOTSUP
}
func initLookupResponse(s *fuse.LookupResponse) {
s.EntryValid = entryValidTime
}
func (c *Server) serve(r fuse.Request) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
parentCtx := ctx
if c.context != nil {
ctx = c.context(ctx, r)
}
req := &serveRequest{Request: r, cancel: cancel}
c.debug(request{
Op: opName(r),
Request: r.Hdr(),
In: r,
})
var node Node
var snode *serveNode
c.meta.Lock()
hdr := r.Hdr()
if id := hdr.Node; id != 0 {
if id < fuse.NodeID(len(c.node)) {
snode = c.node[uint(id)]
}
if snode == nil {
c.meta.Unlock()
c.debug(response{
Op: opName(r),
Request: logResponseHeader{ID: hdr.ID},
Error: fuse.ESTALE.ErrnoName(),
// this is the only place that sets both Error and
// Out; not sure if i want to do that; might get rid
// of len(c.node) things altogether
Out: logMissingNode{
MaxNode: fuse.NodeID(len(c.node)),
},
})
r.RespondError(fuse.ESTALE)
return
}
node = snode.node
}
if c.req[hdr.ID] != nil {
// This happens with OSXFUSE. Assume it's okay and
// that we'll never see an interrupt for this one.
// Otherwise everything wedges. TODO: Report to OSXFUSE?
//
// TODO this might have been because of missing done() calls
} else {
c.req[hdr.ID] = req
}
c.meta.Unlock()
// Call this before responding.
// After responding is too late: we might get another request
// with the same ID and be very confused.
done := func(resp interface{}) {
msg := response{
Op: opName(r),
Request: logResponseHeader{ID: hdr.ID},
}
if err, ok := resp.(error); ok {
msg.Error = err.Error()
if ferr, ok := err.(fuse.ErrorNumber); ok {
errno := ferr.Errno()
msg.Errno = errno.ErrnoName()
if errno == err {
// it's just a fuse.Errno with no extra detail;
// skip the textual message for log readability
msg.Error = ""
}
} else {
msg.Errno = fuse.DefaultErrno.ErrnoName()
}
} else {
msg.Out = resp
}
c.debug(msg)
c.meta.Lock()
delete(c.req, hdr.ID)
c.meta.Unlock()
}
var responded bool
defer func() {
if rec := recover(); rec != nil {
const size = 1 << 16
buf := make([]byte, size)
n := runtime.Stack(buf, false)
buf = buf[:n]
log.Printf("fuse: panic in handler for %v: %v\n%s", r, rec, buf)
err := handlerPanickedError{
Request: r,
Err: rec,
}
done(err)
r.RespondError(err)
return
}
if !responded {
err := handlerTerminatedError{
Request: r,
}
done(err)
r.RespondError(err)
}
}()
if err := c.handleRequest(ctx, node, snode, r, done); err != nil {
if err == context.Canceled {
select {
case <-parentCtx.Done():
// We canceled the parent context because of an
// incoming interrupt request, so return EINTR
// to trigger the right behavior in the client app.
//
// Only do this when it's the parent context that was
// canceled, not a context controlled by the program
// using this library, so we don't return EINTR too
// eagerly -- it might cause busy loops.
//
// Decent write-up on role of EINTR:
// http://250bpm.com/blog:12
err = fuse.EINTR
default:
// nothing
}
}
done(err)
r.RespondError(err)
}
// disarm runtime.Goexit protection
responded = true
}
// handleRequest will either a) call done(s) and r.Respond(s) OR b) return an error.
func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, r fuse.Request, done func(resp interface{})) error {
ctx = context.WithValue(ctx, CtxHeaderUIDKey, r.Hdr().Uid)
switch r := r.(type) {
default:
// Note: To FUSE, ENOSYS means "this server never implements this request."
// It would be inappropriate to return ENOSYS for other operations in this
// switch that might only be unavailable in some contexts, not all.
return fuse.ENOSYS
case *fuse.StatfsRequest:
s := &fuse.StatfsResponse{}
if fs, ok := c.fs.(FSStatfser); ok {
if err := fs.Statfs(ctx, r, s); err != nil {
return err
}
}
done(s)
r.Respond(s)
return nil
// Node operations.
case *fuse.GetattrRequest:
s := &fuse.GetattrResponse{}
if n, ok := node.(NodeGetattrer); ok {
if err := n.Getattr(ctx, r, s); err != nil {
return err
}
} else {
if err := snode.attr(ctx, &s.Attr); err != nil {
return err
}
}
done(s)
r.Respond(s)
return nil
case *fuse.SetattrRequest:
s := &fuse.SetattrResponse{}
if n, ok := node.(NodeSetattrer); ok {
if err := n.Setattr(ctx, r, s); err != nil {
return err
}
}
if err := snode.attr(ctx, &s.Attr); err != nil {
return err
}
done(s)
r.Respond(s)
return nil
case *fuse.SymlinkRequest:
s := &fuse.SymlinkResponse{}
initLookupResponse(&s.LookupResponse)
n, ok := node.(NodeSymlinker)
if !ok {
return fuse.EIO // XXX or EPERM like Mkdir?
}
n2, err := n.Symlink(ctx, r)
if err != nil {
return err
}
if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.NewName, n2); err != nil {
return err
}
done(s)
r.Respond(s)
return nil
case *fuse.ReadlinkRequest:
n, ok := node.(NodeReadlinker)
if !ok {
return fuse.EIO /// XXX or EPERM?
}
target, err := n.Readlink(ctx, r)
if err != nil {
return err
}
done(target)
r.Respond(target)
return nil
case *fuse.LinkRequest:
n, ok := node.(NodeLinker)
if !ok {
return fuse.EIO /// XXX or EPERM?
}
c.meta.Lock()
var oldNode *serveNode
if int(r.OldNode) < len(c.node) {
oldNode = c.node[r.OldNode]
}
c.meta.Unlock()
if oldNode == nil {
c.debug(logLinkRequestOldNodeNotFound{
Request: r.Hdr(),
In: r,
})
return fuse.EIO
}
n2, err := n.Link(ctx, r, oldNode.node)
if err != nil {
return err
}
s := &fuse.LookupResponse{}
initLookupResponse(s)
if err := c.saveLookup(ctx, s, snode, r.NewName, n2); err != nil {
return err
}
done(s)
r.Respond(s)
return nil
case *fuse.RemoveRequest:
n, ok := node.(NodeRemover)
if !ok {
return fuse.EIO /// XXX or EPERM?
}
err := n.Remove(ctx, r)
if err != nil {
return err
}
done(nil)
r.Respond()
return nil
case *fuse.AccessRequest:
if n, ok := node.(NodeAccesser); ok {
if err := n.Access(ctx, r); err != nil {
return err
}
}
done(nil)
r.Respond()
return nil
case *fuse.LookupRequest:
var n2 Node
var err error
s := &fuse.LookupResponse{}
initLookupResponse(s)
if n, ok := node.(NodeStringLookuper); ok {
n2, err = n.Lookup(ctx, r.Name)
} else if n, ok := node.(NodeRequestLookuper); ok {
n2, err = n.Lookup(ctx, r, s)
} else {
return fuse.ENOENT
}
if err != nil {
return err
}
if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil {
return err
}
done(s)
r.Respond(s)
return nil
case *fuse.MkdirRequest:
s := &fuse.MkdirResponse{}
initLookupResponse(&s.LookupResponse)
n, ok := node.(NodeMkdirer)
if !ok {
return fuse.EPERM
}
n2, err := n.Mkdir(ctx, r)
if err != nil {
return err
}
if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil {
return err
}
done(s)
r.Respond(s)
return nil
case *fuse.OpenRequest:
s := &fuse.OpenResponse{}
var h2 Handle
if n, ok := node.(NodeOpener); ok {
hh, err := n.Open(ctx, r, s)
if err != nil {
return err
}
h2 = hh
} else {
h2 = node
}
s.Handle = c.saveHandle(h2, r.Hdr().Node)
done(s)
r.Respond(s)
return nil
case *fuse.CreateRequest:
n, ok := node.(NodeCreater)
if !ok {
// If we send back ENOSYS, FUSE will try mknod+open.
return fuse.EPERM
}
s := &fuse.CreateResponse{OpenResponse: fuse.OpenResponse{}}
initLookupResponse(&s.LookupResponse)
n2, h2, err := n.Create(ctx, r, s)
if err != nil {
return err
}
if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil {
return err
}
s.Handle = c.saveHandle(h2, r.Hdr().Node)
done(s)
r.Respond(s)
return nil
case *fuse.GetxattrRequest:
n, ok := node.(NodeGetxattrer)
if !ok {
return fuse.ENOTSUP
}
s := &fuse.GetxattrResponse{}
err := n.Getxattr(ctx, r, s)
if err != nil {
return err
}
if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) {
return fuse.ERANGE
}
done(s)
r.Respond(s)
return nil
case *fuse.ListxattrRequest:
n, ok := node.(NodeListxattrer)
if !ok {
return fuse.ENOTSUP
}
s := &fuse.ListxattrResponse{}
err := n.Listxattr(ctx, r, s)
if err != nil {
return err
}
if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) {
return fuse.ERANGE
}
done(s)
r.Respond(s)
return nil
case *fuse.SetxattrRequest:
n, ok := node.(NodeSetxattrer)
if !ok {
return fuse.ENOTSUP
}
err := n.Setxattr(ctx, r)
if err != nil {
return err
}
done(nil)
r.Respond()
return nil
case *fuse.RemovexattrRequest:
n, ok := node.(NodeRemovexattrer)
if !ok {
return fuse.ENOTSUP
}
err := n.Removexattr(ctx, r)
if err != nil {
return err
}
done(nil)
r.Respond()
return nil
case *fuse.ForgetRequest:
forget := c.dropNode(r.Hdr().Node, r.N)
if forget {
n, ok := node.(NodeForgetter)
if ok {
n.Forget()
}
}
done(nil)
r.Respond()
return nil
// Handle operations.
case *fuse.ReadRequest:
shandle := c.getHandle(r.Handle)
if shandle == nil {
return fuse.ESTALE
}
handle := shandle.handle
s := &fuse.ReadResponse{Data: make([]byte, 0, r.Size)}
if r.Dir {
if h, ok := handle.(HandleReadDirAller); ok {
// detect rewinddir(3) or similar seek and refresh
// contents
if r.Offset == 0 {
shandle.readData = nil
}
if shandle.readData == nil {
dirs, err := h.ReadDirAll(ctx)
if err != nil {
return err
}
var data []byte
for _, dir := range dirs {
if dir.Inode == 0 {
dir.Inode = c.dynamicInode(snode.inode, dir.Name)
}
data = fuse.AppendDirent(data, dir)
}
shandle.readData = data
}
fuseutil.HandleRead(r, s, shandle.readData)
done(s)
r.Respond(s)
return nil
}
} else {
if h, ok := handle.(HandleReadAller); ok {
if shandle.readData == nil {
data, err := h.ReadAll(ctx)
if err != nil {
return err
}
if data == nil {
data = []byte{}
}
shandle.readData = data
}
fuseutil.HandleRead(r, s, shandle.readData)
done(s)
r.Respond(s)
return nil
}
h, ok := handle.(HandleReader)
if !ok {
err := handleNotReaderError{handle: handle}
return err
}
if err := h.Read(ctx, r, s); err != nil {
return err
}
}
done(s)
r.Respond(s)
return nil
case *fuse.WriteRequest:
shandle := c.getHandle(r.Handle)
if shandle == nil {
return fuse.ESTALE
}
s := &fuse.WriteResponse{}
if h, ok := shandle.handle.(HandleWriter); ok {
if err := h.Write(ctx, r, s); err != nil {
return err
}
done(s)
r.Respond(s)
return nil
}
return fuse.EIO
case *fuse.FlushRequest:
shandle := c.getHandle(r.Handle)
if shandle == nil {
return fuse.ESTALE
}
handle := shandle.handle
if h, ok := handle.(HandleFlusher); ok {
if err := h.Flush(ctx, r); err != nil {
return err
}
}
done(nil)
r.Respond()
return nil
case *fuse.ReleaseRequest:
shandle := c.getHandle(r.Handle)
if shandle == nil {
return fuse.ESTALE
}
handle := shandle.handle
// No matter what, release the handle.
c.dropHandle(r.Handle)
if h, ok := handle.(HandleReleaser); ok {
if err := h.Release(ctx, r); err != nil {
return err
}
}
done(nil)
r.Respond()
return nil
case *fuse.DestroyRequest:
if fs, ok := c.fs.(FSDestroyer); ok {
fs.Destroy()
}
done(nil)
r.Respond()
return nil
case *fuse.RenameRequest:
c.meta.Lock()
var newDirNode *serveNode
if int(r.NewDir) < len(c.node) {
newDirNode = c.node[r.NewDir]
}
c.meta.Unlock()
if newDirNode == nil {
c.debug(renameNewDirNodeNotFound{
Request: r.Hdr(),
In: r,
})
return fuse.EIO
}
n, ok := node.(NodeRenamer)
if !ok {
return fuse.EIO // XXX or EPERM like Mkdir?
}
err := n.Rename(ctx, r, newDirNode.node)
if err != nil {
return err
}
done(nil)
r.Respond()
return nil
case *fuse.MknodRequest:
n, ok := node.(NodeMknoder)
if !ok {
return fuse.EIO
}
n2, err := n.Mknod(ctx, r)
if err != nil {
return err
}
s := &fuse.LookupResponse{}
initLookupResponse(s)
if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil {
return err
}
done(s)
r.Respond(s)
return nil
case *fuse.FsyncRequest:
n, ok := node.(NodeFsyncer)
if !ok {
return fuse.EIO
}
err := n.Fsync(ctx, r)
if err != nil {
return err
}
done(nil)
r.Respond()
return nil
case *fuse.InterruptRequest:
c.meta.Lock()
ireq := c.req[r.IntrID]
if ireq != nil && ireq.cancel != nil {
ireq.cancel()
ireq.cancel = nil
}
c.meta.Unlock()
done(nil)
r.Respond()
return nil
/* case *FsyncdirRequest:
return ENOSYS
case *GetlkRequest, *SetlkRequest, *SetlkwRequest:
return ENOSYS
case *BmapRequest:
return ENOSYS
case *SetvolnameRequest, *GetxtimesRequest, *ExchangeRequest:
return ENOSYS
*/
}
panic("not reached")
}
func (c *Server) saveLookup(ctx context.Context, s *fuse.LookupResponse, snode *serveNode, elem string, n2 Node) error {
if err := nodeAttr(ctx, n2, &s.Attr); err != nil {
return err
}
if s.Attr.Inode == 0 {
s.Attr.Inode = c.dynamicInode(snode.inode, elem)
}
s.Node, s.Generation = c.saveNode(s.Attr.Inode, n2)
return nil
}
type invalidateNodeDetail struct {
Off int64
Size int64
}
func (i invalidateNodeDetail) String() string {
return fmt.Sprintf("Off:%d Size:%d", i.Off, i.Size)
}
func errstr(err error) string {
if err == nil {
return ""
}
return err.Error()
}
func (s *Server) invalidateNode(node Node, off int64, size int64) error {
s.meta.Lock()
id, ok := s.nodeRef[node]
if ok {
snode := s.node[id]
snode.wg.Add(1)
defer snode.wg.Done()
}
s.meta.Unlock()
if !ok {
// This is what the kernel would have said, if we had been
// able to send this message; it's not cached.
return fuse.ErrNotCached
}
// Delay logging until after we can record the error too. We
// consider a /dev/fuse write to be instantaneous enough to not
// need separate before and after messages.
err := s.conn.InvalidateNode(id, off, size)
s.debug(notification{
Op: "InvalidateNode",
Node: id,
Out: invalidateNodeDetail{
Off: off,
Size: size,
},
Err: errstr(err),
})
return err
}
// InvalidateNodeAttr invalidates the kernel cache of the attributes
// of node.
//
// Returns fuse.ErrNotCached if the kernel is not currently caching
// the node.
func (s *Server) InvalidateNodeAttr(node Node) error {
return s.invalidateNode(node, 0, 0)
}
// InvalidateNodeData invalidates the kernel cache of the attributes
// and data of node.
//
// Returns fuse.ErrNotCached if the kernel is not currently caching
// the node.
func (s *Server) InvalidateNodeData(node Node) error {
return s.invalidateNode(node, 0, -1)
}
// InvalidateNodeDataRange invalidates the kernel cache of the
// attributes and a range of the data of node.
//
// Returns fuse.ErrNotCached if the kernel is not currently caching
// the node.
func (s *Server) InvalidateNodeDataRange(node Node, off int64, size int64) error {
return s.invalidateNode(node, off, size)
}
type invalidateEntryDetail struct {
Name string
}
func (i invalidateEntryDetail) String() string {
return fmt.Sprintf("%q", i.Name)
}
// InvalidateEntry invalidates the kernel cache of the directory entry
// identified by parent node and entry basename.
//
// Kernel may or may not cache directory listings. To invalidate
// those, use InvalidateNode to invalidate all of the data for a
// directory. (As of 2015-06, Linux FUSE does not cache directory
// listings.)
//
// Returns ErrNotCached if the kernel is not currently caching the
// node.
func (s *Server) InvalidateEntry(parent Node, name string) error {
s.meta.Lock()
id, ok := s.nodeRef[parent]
if ok {
snode := s.node[id]
snode.wg.Add(1)
defer snode.wg.Done()
}
s.meta.Unlock()
if !ok {
// This is what the kernel would have said, if we had been
// able to send this message; it's not cached.
return fuse.ErrNotCached
}
err := s.conn.InvalidateEntry(id, name)
s.debug(notification{
Op: "InvalidateEntry",
Node: id,
Out: invalidateEntryDetail{
Name: name,
},
Err: errstr(err),
})
return err
}
// DataHandle returns a read-only Handle that satisfies reads
// using the given data.
func DataHandle(data []byte) Handle {
return &dataHandle{data}
}
type dataHandle struct {
data []byte
}
func (d *dataHandle) ReadAll(ctx context.Context) ([]byte, error) {
return d.data, nil
}
// GenerateDynamicInode returns a dynamic inode.
//
// The parent inode and current entry name are used as the criteria
// for choosing a pseudorandom inode. This makes it likely the same
// entry will get the same inode on multiple runs.
func GenerateDynamicInode(parent uint64, name string) uint64 {
h := fnv.New64a()
var buf [8]byte
binary.LittleEndian.PutUint64(buf[:], parent)
_, _ = h.Write(buf[:])
_, _ = h.Write([]byte(name))
var inode uint64
for {
inode = h.Sum64()
if inode != 0 {
break
}
// there's a tiny probability that result is zero; change the
// input a little and try again
_, _ = h.Write([]byte{'x'})
}
return inode
}<|fim▁end|>
| |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>import styled, { css as styledCss, keyframes } from 'styled-components'
import type { TTestable } from '@/spec'
import Img from '@/Img'
import { theme } from '@/utils/themes'
import css from '@/utils/css'
const DURATION = '2.5s'
const load = keyframes`
0% {
top: 24px;
}
70% {
top: 10px;
}
90% {
top: 0;
}
95% {
top: 0;
}
100% {
top: 24px;
}
`
const liquid1 = keyframes`
0% {
height: 0;
opacity: 0;
top: -5px;
}
22% {
height: 2.8125px;
top: 3.75px;
opacity: 1;
}
25% {
top: -2.5px;
}
35% {
height: 11.25px;
top: -5px;
}
55% {
height: 3px;
top: -1.25px;
}
60% {
height: 6px;
opacity: 1;
top: -3px;
}
96% {
height: 8.4375px;
opacity: 0;
top: 5px;
}
100% {
height: 0;
opacity: 0;
}
`
const liquid2 = keyframes`
0% {
height: 0;
opacity: 0;
top: -0.5rem;
}
17.5% {
height: 3px;
top: 2px;
opacity: 1;
}
20% {
top: -2.5px;
}
25% {
height: 15px;
top: -6px;
}
45% {
height: 3px;
top: -1px;
}
60% {
opacity: 1;
height: 15px;
top: -5px;
}
96% {
opacity: 0;
height: 8px;
top: 5px;
}
100% {
height: 0;
opacity: 0;
}
`
const loadRule = styledCss`
${load} ${DURATION} infinite;
`
const liquid1Rule = styledCss`
${liquid1} ${DURATION} infinite;
`<|fim▁hole|>`
export const Wrapper = styled.div.attrs(({ testid }: TTestable) => ({
'data-test-id': testid,
}))<TTestable>`
text-align: center;
position: relative;
height: 28px;
margin-bottom: 6px;
cursor: pointer;
`
export const Battery = styled.div`
display: inline-block;
position: relative;
width: 16px;
height: 26px;
box-shadow: 0 0 0 2px #155e76;
border-radius: 2px;
&:before {
content: '';
position: absolute;
left: 5px;
top: -4px;
height: 3px;
width: 6px;
background: #155e76;
border-radius: 2px;
}
${Wrapper}:hover & {
&:after {
content: '';
position: absolute;
top: 0;
bottom: 0;
left: 0;
right: 0;
border-right: 16px solid transparent;
border-bottom: 22px solid rgba(255, 255, 255, 0.25);
}
}
`
export const Liquid = styled.div`
position: absolute;
top: 23px;
bottom: 0;
left: 0;
right: 0;
width: 16px;
background: ${theme('baseColor.green')};
${Wrapper}:hover & {
top: 0;
animation: ${loadRule};
&:before {
left: 0;
animation: ${liquid2Rule};
content: '';
position: absolute;
top: -5px;
height: 11.25px;
width: 14.625px;
background: ${theme('baseColor.green')};
border-radius: 50%;
opacity: 0;
}
&:after {
right: 0;
animation: ${liquid1Rule};
content: '';
position: absolute;
top: -5px;
height: 11.25px;
width: 14.625px;
background: ${theme('baseColor.green')};
border-radius: 50%;
opacity: 0;
}
}
`
export const MoneySign = styled(Img)`
position: absolute;
top: 6px;
left: 3px;
${css.size(10)};
fill: #327faf;
transition: opacity 0.25s;
${Wrapper}:hover & {
fill: #ecbcb3;
top: 8px;
left: 2px;
${css.size(12)};
}
transition: all 0.2s;
`<|fim▁end|>
|
const liquid2Rule = styledCss`
${liquid2} ${DURATION} infinite;
|
<|file_name|>app.component.ts<|end_file_name|><|fim▁begin|>/*
* Angular 2 decorators and services
*/
import {
Component,
OnInit,
ViewEncapsulation
} from '@angular/core';
import { AppState } from './app.service';
/*
* App Component
* Top Level Component
*/
@Component({
selector: 'app',
encapsulation: ViewEncapsulation.None,
styleUrls: [
'./app.component.css'
],
templateUrl: 'app.component.html'
})
export class AppComponent implements OnInit {
public angularclassLogo = 'agit ssets/img/angularclass-avatar.png';
public name = 'Angular 2 Webpack Starter';
public url = 'https://twitter.com/AngularClass';
/*
constructor(
// public appState: AppState
) {}
*/
public ngOnInit() {<|fim▁hole|>
/*
* Please review the https://github.com/AngularClass/angular2-examples/ repo for
* more angular app examples that you may copy/paste
* (The examples may not be updated as quickly. Please open an issue on github for us to update it)
* For help or questions please contact us at @AngularClass on twitter
* or our chat on Slack at https://AngularClass.com/slack-join
*/<|fim▁end|>
|
// console.log('Initial App State', this.appState.state);
}
}
|
<|file_name|>model_deployment.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");<|fim▁hole|># https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deploy a model in AI Platform."""
import logging
import json
import time
import subprocess
from googleapiclient import discovery
from googleapiclient import errors
_WAIT_FOR_COMPLETION_SLEEP_SECONDS = 10
_PYTHON_VERSION = '3.5'
_RUN_TIME_VERSION = '1.15'
def _create_service():
"""Gets service instance to start API searches.
:return:
"""
return discovery.build('ml', 'v1')
def copy_artifacts(source_path, destination_path):
"""
:param source_path:
:param destination_path:
:return:
"""
logging.info(
'Moving model directory from {} to {}'.format(source_path,
destination_path))
subprocess.call(
"gsutil -m cp -r {} {}".format(source_path, destination_path),
shell=True)
class AIPlatformModel(object):
def __init__(self, project_id):
self._project_id = project_id
self._service = _create_service()
def model_exists(self, model_name):
"""
:param model_name:
:return:
"""
models = self._service.projects().models()
try:
response = models.list(
parent='projects/{}'.format(self._project_id)).execute()
if response:
for model in response['models']:
if model['name'].rsplit('/', 1)[1] == model_name:
return True
else:
return False
except errors.HttpError as err:
logging.error('%s', json.loads(err.content)['error']['message'])
def _list_model_versions(self, model_name):
"""Lists existing model versions in the project.
Args:
model_name: Model name to list versions for.
Returns:
Dictionary of model versions.
"""
versions = self._service.projects().models().versions()
try:
return versions.list(
parent='projects/{}/models/{}'.format(self._project_id,
model_name)).execute()
except errors.HttpError as err:
logging.error('%s', json.loads(err.content)['error']['message'])
def create_model(self, model_name, model_region='us-central1'):
"""
:param model_name:
:param model_region:
:return:
"""
if not self.model_exists(model_name):
body = {
'name': model_name,
'regions': model_region,
'description': 'MLflow model'
}
parent = 'projects/{}'.format(self._project_id)
try:
self._service.projects().models().create(
parent=parent, body=body).execute()
logging.info('Model "%s" has been created.', model_name)
except errors.HttpError as err:
logging.error('"%s". Skipping model creation.',
json.loads(err.content)['error']['message'])
else:
logging.warning('Model "%s" already exists.', model_name)
def deploy_model(self, bucket_name, model_name, model_version,
runtime_version=_RUN_TIME_VERSION):
"""Deploys model on AI Platform.
Args:
bucket_name: Cloud Storage Bucket name that stores saved model.
model_name: Model name to deploy.
model_version: Model version.
runtime_version: Runtime version.
Raises:
RuntimeError if deployment completes with errors.
"""
# For details on request body, refer to:
# https://cloud.google.com/ml-engine/reference/rest/v1/projects
# .models.versions/create
model_version_exists = False
model_versions_list = self._list_model_versions(model_name)
# Field: version.name Error: A name should start with a letter and
# contain only letters, numbers and underscores
model_version = 'mlflow_{}'.format(model_version)
if model_versions_list:
for version in model_versions_list['versions']:
if version['name'].rsplit('/', 1)[1] == model_version:
model_version_exists = True
if not model_version_exists:
request_body = {
'name': model_version,
'deploymentUri': '{}'.format(bucket_name),
'framework': 'TENSORFLOW',
'runtimeVersion': runtime_version,
'pythonVersion': _PYTHON_VERSION
}
parent = 'projects/{}/models/{}'.format(self._project_id,
model_name)
response = self._service.projects().models().versions().create(
parent=parent, body=request_body).execute()
op_name = response['name']
while True:
deploy_status = (
self._service.projects().operations().get(
name=op_name).execute())
if deploy_status.get('done'):
logging.info('Model "%s" with version "%s" deployed.',
model_name,
model_version)
break
if deploy_status.get('error'):
logging.error(deploy_status['error'])
raise RuntimeError(
'Failed to deploy model for serving: {}'.format(
deploy_status['error']))
logging.info(
'Waiting for %d seconds for "%s" with "%s" version to be '
'deployed.',
_WAIT_FOR_COMPLETION_SLEEP_SECONDS, model_name,
model_version)
time.sleep(_WAIT_FOR_COMPLETION_SLEEP_SECONDS)
else:
logging.info('Model "%s" with version "%s" already exists.',
model_name,
model_version)<|fim▁end|>
|
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR<|fim▁hole|># OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
''' The setup script is the center of all activity in building,
distributing, and installing modules using the Distutils. The
main purpose of the setup script is to describe your module
distribution to the Distutils, so that the various commands
that operate on your modules do the right thing.
'''
import os
from glob import glob
from setuptools import setup, find_packages
from rphm import __version__, __author__
def find_modules(pkg):
''' Find the modules that belong in this package. '''
modules = [pkg]
for dirname, dirnames, _ in os.walk(pkg):
for subdirname in dirnames:
modules.append(os.path.join(dirname, subdirname))
return modules
INSTALL_ROOT = os.getenv('VIRTUAL_ENV', '')
CONF_PATH = INSTALL_ROOT + '/persist/sys'
INSTALL_REQUIREMENTS = [
'jsonrpclib'
]
TEST_REQUIREMENTS = [
'mock'
]
setup(
name='rphm',
version=__version__,
description='EOS extension to generate SNMP traps based on counter thresholds',
long_description=open('README.md').read(),
author=__author__,
author_email='[email protected]',
url='http://eos.arista.com',
license='BSD-3',
install_requires=INSTALL_REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
packages=find_modules('rphm'),
scripts=glob('bin/*'),
data_files=[
(CONF_PATH, ['conf/rphm.conf'])
]
)<|fim▁end|>
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![cfg_attr(feature = "unstable", feature(const_fn, drop_types_in_const))]
#![cfg_attr(feature = "serde_derive", feature(proc_macro))]
#![cfg_attr(feature = "nightly-testing", feature(plugin))]
#![cfg_attr(feature = "nightly-testing", plugin(clippy))]
#![cfg_attr(not(feature = "unstable"), deny(warnings))]
extern crate inflector;
#[macro_use]
extern crate lazy_static;
extern crate regex;
extern crate serde;
extern crate serde_json;
#[cfg(feature = "serde_derive")]
#[macro_use]
extern crate serde_derive;
#[cfg(not(feature = "serde_derive"))]
extern crate serde_codegen;
use std::fs::File;
use std::io::{Write, BufReader, BufWriter};
use std::path::Path;
use botocore::Service as BotocoreService;
use generator::generate_source;
mod botocore;
mod generator;
mod serialization;
mod util;
const BOTOCORE_DIR: &'static str = concat!(env!("CARGO_MANIFEST_DIR"), "/botocore/botocore/data/");
pub struct Service {
name: String,
protocol_date: String,
}
impl Service {
pub fn new<S>(name: S, protocol_date: S) -> Self
where S: Into<String>
{
Service {
name: name.into(),
protocol_date: protocol_date.into(),
}
}
}
pub fn generate(service: Service, output_path: &Path) {
let botocore_destination_path = output_path.join(format!("{}_botocore.rs", service.name));
let serde_destination_path = output_path.join(format!("{}.rs", service.name));
let botocore_service_data_path = Path::new(BOTOCORE_DIR)
.join(format!("{}/{}/service-2.json", service.name, service.protocol_date));
botocore_generate(botocore_service_data_path.as_path(),
botocore_destination_path.as_path());
serde_generate(botocore_destination_path.as_path(),
serde_destination_path.as_path());
}
fn botocore_generate(input_path: &Path, output_path: &Path) {
let input_file = File::open(input_path).expect(&format!(
"{:?} not found",
input_path,
));
let service_data_as_reader = BufReader::new(input_file);
let service: BotocoreService = serde_json::from_reader(service_data_as_reader).expect(&format!(
"Could not convert JSON in {:?} to Service",
input_path,
));
let source_code = generate_source(&service);
let output_file = File::create(output_path).expect(&format!(
"Couldn't open file for writing: {:?}",
output_path,
));
let mut output_bufwriter = BufWriter::new(output_file);
output_bufwriter.write_all(source_code.as_bytes()).expect(&format!(
"Failed to write generated source code to {:?}",<|fim▁hole|>#[cfg(not(feature = "serde_derive"))]
fn serde_generate(source: &Path, destination: &Path) {
::serde_codegen::expand(&source, &destination).unwrap();
}
#[cfg(feature = "serde_derive")]
fn serde_generate(source: &Path, destination: &Path) {
::std::fs::copy(source, destination).expect(&format!(
"Failed to copy {:?} to {:?}",
source,
destination,
));
}<|fim▁end|>
|
output_path,
));
}
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup, find_packages
setup(
name='pulp_ostree_common',
version='1.0.0a1',
packages=find_packages(),
url='http://www.pulpproject.org',
license='GPLv2+',<|fim▁hole|><|fim▁end|>
|
author='Pulp Team',
author_email='[email protected]',
description='common code for pulp\'s ostree support',
)
|
<|file_name|>td_sequence.py<|end_file_name|><|fim▁begin|>'''
TD_Sequence in class.
'''
import numpy as np
import pandas as pd
import json
import pandas.io.data as web
from datetime import date, datetime, timedelta<|fim▁hole|> def __init__(self, data):
self.data = data
def sequence(self):
setup = self.data.iloc[-1]['Close'] - self.data.iloc[-1-4]['Close']
buy_setup = True
buy_counter = 1
sell_counter = -1
if setup < 0:
'''buy setup'''
buy_setup = True
elif setup > 0:
'''sell setup'''
buy_setup = False
for i in xrange(1,(len(self.data))):
if buy_setup:
buy = self.data.iloc[-1-i]['Close'] - self.data.iloc[-5-i]['Close']
if buy < 0:
buy_counter += 1
if buy_counter > 9:
'''failed to reverse, reset buy counter back to 1'''
buy_counter = 1
if buy_counter == 9 and ((self.data.iloc[-2-i]['Close'] - self.data.iloc[-6-i]['Close'])>0):
if ((self.data.iloc[-1]['Low'] <= self.data.iloc[-3]['Low']) and (self.data.iloc[-1]['Low'] <= self.data.iloc[-4]['Low'])) or \
((self.data.iloc[-2]['Low'] <= self.data.iloc[-3]['Low']) and (self.data.iloc[-2]['Low'] <= self.data.iloc[-4]['Low'])):
buy_counter = 10
return buy_counter
else:
return buy_counter
else:
if (buy_counter == 8) and ((self.data.iloc[-2]['Low'] <= self.data.iloc[-3]['Low']) and (self.data.iloc[-2]['Low'] <= self.data.iloc[-4]['Low'])):
buy_counter = 8.5
return 8.5
else:
return buy_counter
else:
sell = self.data.iloc[-1-i]['Close'] - self.data.iloc[-5-i]['Close']
if sell > 0:
sell_counter -= 1
if sell_counter < -9:
'''failed to reverse, reset buy counter back to -1'''
sell_counter = -1
if sell_counter == -9 and ((self.data.iloc[-2-i]['Close'] - self.data.iloc[-6-i]['Close'])<0):
if ((self.data.iloc[-1]['High'] > self.data.iloc[-3]['High']) and (self.data.iloc[-1]['High'] > self.data.iloc[-4]['High'])) or \
((self.data.iloc[-2]['High'] > self.data.iloc[-3]['High']) and (self.data.iloc[-2]['High'] > self.data.iloc[-4]['High'])):
sell_counter = -10
return sell_counter
else:
return sell_counter
else:
if sell_counter == -8 and ((self.data.iloc[-2]['High'] > self.data.iloc[-3]['High']) and (self.data.iloc[-2]['High'] > self.data.iloc[-4]['High'])):
sell_counter = -8.5
return -8.5
else:
return sell_counter<|fim▁end|>
|
from collections import defaultdict
class TDSequence(object):
|
<|file_name|>test_overlay.py<|end_file_name|><|fim▁begin|>import os
import pytest
import sdk_install
import sdk_networks
import sdk_utils
from tests import config
overlay_nostrict = pytest.mark.skipif(os.environ.get("SECURITY") == "strict",
reason="overlay tests currently broken in strict")
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_TASK_COUNT,
additional_options=sdk_networks.ENABLE_VIRTUAL_NETWORKS_OPTIONS)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.overlay
@overlay_nostrict<|fim▁hole|><|fim▁end|>
|
@pytest.mark.dcos_min_version('1.9')
def test_install():
sdk_networks.check_task_network("template-0-node")
|
<|file_name|>deriving-span-Show-enum-struct-variant.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file was auto-generated using 'src/etc/generate-deriving-span-tests.py'
#![feature(struct_variant)]<|fim▁hole|>
#[deriving(Show)]
enum Enum {
A {
x: Error //~ ERROR
}
}
fn main() {}<|fim▁end|>
|
extern crate rand;
struct Error;
|
<|file_name|>package-info.java<|end_file_name|><|fim▁begin|>/**
* CKFinder
* ========
* http://ckfinder.com
* Copyright (C) 2007-2013, CKSource - Frederico Knabben. All rights reserved.
*
* The software, this file and its contents are subject to the CKFinder
* License. Please read the license.txt file before using, installing, copying,
* modifying or distribute this file or part of its contents. The contents of
* this file is part of the Source Code of CKFinder.
*/<|fim▁hole|><|fim▁end|>
|
/**
* CKFinder for Java - server connector.
*/
package com.ckfinder.connector;
|
<|file_name|>03-literals-and-operators.rs<|end_file_name|><|fim▁begin|>fn main() {
// Integer addition
println!("1 + 2 = {}", 1u + 2);
// Integer subtraction
println!("1 - 2 = {}", 1i - 2);
<|fim▁hole|> println!("true OR false is {}", true || false);
println!("NOT true is {}", !true);
// Bitwise operations
println!("0011 AND 0101 is {:04t}", 0b0011u & 0b0101);
println!("0011 OR 0101 is {:04t}", 0b0011u | 0b0101);
println!("0011 XOR 0101 is {:04t}", 0b0011u ^ 0b0101);
println!("1 << 5 is {}", 1u << 5);
println!("0x80 >> 2 is 0x{:x}", 0x80u >> 2);
// Use underscores to improve readability!
println!("One million is written as {}", 1_000_000u);
}<|fim▁end|>
|
// Short-circuiting boolean logic
println!("true AND false is {}", true && false);
|
<|file_name|>fdb-legacy.js<|end_file_name|><|fim▁begin|>(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(_dereq_,module,exports){
var Core = _dereq_('../lib/Core'),
CollectionGroup = _dereq_('../lib/CollectionGroup'),
View = _dereq_('../lib/View'),
Highchart = _dereq_('../lib/Highchart'),
Persist = _dereq_('../lib/Persist'),
Document = _dereq_('../lib/Document'),
Overview = _dereq_('../lib/Overview'),
OldView = _dereq_('../lib/OldView'),
OldViewBind = _dereq_('../lib/OldView.Bind');
if (typeof window !== 'undefined') {
window.ForerunnerDB = Core;
}
module.exports = Core;
},{"../lib/CollectionGroup":4,"../lib/Core":5,"../lib/Document":7,"../lib/Highchart":8,"../lib/OldView":22,"../lib/OldView.Bind":21,"../lib/Overview":25,"../lib/Persist":27,"../lib/View":30}],2:[function(_dereq_,module,exports){
"use strict";
/**
* Creates an always-sorted multi-key bucket that allows ForerunnerDB to
* know the index that a document will occupy in an array with minimal
* processing, speeding up things like sorted views.
*/
var Shared = _dereq_('./Shared');
/**
* The active bucket class.
* @param {object} orderBy An order object.
* @constructor
*/
var ActiveBucket = function (orderBy) {
var sortKey;
this._primaryKey = '_id';
this._keyArr = [];
this._data = [];
this._objLookup = {};
this._count = 0;
for (sortKey in orderBy) {
if (orderBy.hasOwnProperty(sortKey)) {
this._keyArr.push({
key: sortKey,
dir: orderBy[sortKey]
});
}
}
};
Shared.addModule('ActiveBucket', ActiveBucket);
Shared.synthesize(ActiveBucket.prototype, 'primaryKey');
Shared.mixin(ActiveBucket.prototype, 'Mixin.Sorting');
/**
* Quicksorts a single document into the passed array and
* returns the index that the document should occupy.
* @param {object} obj The document to calculate index for.
* @param {array} arr The array the document index will be
* calculated for.
* @param {string} item The string key representation of the
* document whose index is being calculated.
* @param {function} fn The comparison function that is used
* to determine if a document is sorted below or above the
* document we are calculating the index for.
* @returns {number} The index the document should occupy.
*/
ActiveBucket.prototype.qs = function (obj, arr, item, fn) {
// If the array is empty then return index zero
if (!arr.length) {
return 0;
}
var lastMidwayIndex = -1,
midwayIndex,
lookupItem,
result,
start = 0,
end = arr.length - 1;
// Loop the data until our range overlaps
while (end >= start) {
// Calculate the midway point (divide and conquer)
midwayIndex = Math.floor((start + end) / 2);
if (lastMidwayIndex === midwayIndex) {
// No more items to scan
break;
}
// Get the item to compare against
lookupItem = arr[midwayIndex];
if (lookupItem !== undefined) {
// Compare items
result = fn(this, obj, item, lookupItem);
if (result > 0) {
start = midwayIndex + 1;
}
if (result < 0) {
end = midwayIndex - 1;
}
}
lastMidwayIndex = midwayIndex;
}
if (result > 0) {
return midwayIndex + 1;
} else {
return midwayIndex;
}
};
/**
* Calculates the sort position of an item against another item.
* @param {object} sorter An object or instance that contains
* sortAsc and sortDesc methods.
* @param {object} obj The document to compare.
* @param {string} a The first key to compare.
* @param {string} b The second key to compare.
* @returns {number} Either 1 for sort a after b or -1 to sort
* a before b.
* @private
*/
ActiveBucket.prototype._sortFunc = function (sorter, obj, a, b) {
var aVals = a.split('.:.'),
bVals = b.split('.:.'),
arr = sorter._keyArr,
count = arr.length,
index,
sortType,
castType;
for (index = 0; index < count; index++) {
sortType = arr[index];
castType = typeof obj[sortType.key];
if (castType === 'number') {
aVals[index] = Number(aVals[index]);
bVals[index] = Number(bVals[index]);
}
// Check for non-equal items
if (aVals[index] !== bVals[index]) {
// Return the sorted items
if (sortType.dir === 1) {
return sorter.sortAsc(aVals[index], bVals[index]);
}
if (sortType.dir === -1) {
return sorter.sortDesc(aVals[index], bVals[index]);
}
}
}
};
/**
* Inserts a document into the active bucket.
* @param {object} obj The document to insert.
* @returns {number} The index the document now occupies.
*/
ActiveBucket.prototype.insert = function (obj) {
var key,
keyIndex;
key = this.documentKey(obj);
keyIndex = this._data.indexOf(key);
if (keyIndex === -1) {
// Insert key
keyIndex = this.qs(obj, this._data, key, this._sortFunc);
this._data.splice(keyIndex, 0, key);
} else {
this._data.splice(keyIndex, 0, key);
}
this._objLookup[obj[this._primaryKey]] = key;
this._count++;
return keyIndex;
};
/**
* Removes a document from the active bucket.
* @param {object} obj The document to remove.
* @returns {boolean} True if the document was removed
* successfully or false if it wasn't found in the active
* bucket.
*/
ActiveBucket.prototype.remove = function (obj) {
var key,
keyIndex;
key = this._objLookup[obj[this._primaryKey]];
if (key) {
keyIndex = this._data.indexOf(key);
if (keyIndex > -1) {
this._data.splice(keyIndex, 1);
delete this._objLookup[obj[this._primaryKey]];
this._count--;
return true;
} else {
return false;
}
}
return false;
};
/**
* Get the index that the passed document currently occupies
* or the index it will occupy if added to the active bucket.
* @param {object} obj The document to get the index for.
* @returns {number} The index.
*/
ActiveBucket.prototype.index = function (obj) {
var key,
keyIndex;
key = this.documentKey(obj);
keyIndex = this._data.indexOf(key);
if (keyIndex === -1) {
// Get key index
keyIndex = this.qs(obj, this._data, key, this._sortFunc);
}
return keyIndex;
};
/**
* The key that represents the passed document.
* @param {object} obj The document to get the key for.
* @returns {string} The document key.
*/
ActiveBucket.prototype.documentKey = function (obj) {
var key = '',
arr = this._keyArr,
count = arr.length,
index,
sortType;
for (index = 0; index < count; index++) {
sortType = arr[index];
if (key) {
key += '.:.';
}
key += obj[sortType.key];
}
// Add the unique identifier on the end of the key
key += '.:.' + obj[this._primaryKey];
return key;
};
/**
* Get the number of documents currently indexed in the active
* bucket instance.
* @returns {number} The number of documents.
*/
ActiveBucket.prototype.count = function () {
return this._count;
};
Shared.finishModule('ActiveBucket');
module.exports = ActiveBucket;
},{"./Shared":29}],3:[function(_dereq_,module,exports){
"use strict";
/**
* The main collection class. Collections store multiple documents and
* can operate on them using the query language to insert, read, update
* and delete.
*/
var Shared,
Core,
Metrics,
KeyValueStore,
Path,
IndexHashMap,
IndexBinaryTree,
Crc,
Overload,
ReactorIO;
Shared = _dereq_('./Shared');
/**
* Collection object used to store data.
* @constructor
*/
var Collection = function (name) {
this.init.apply(this, arguments);
};
Collection.prototype.init = function (name) {
this._primaryKey = '_id';
this._primaryIndex = new KeyValueStore('primary');
this._primaryCrc = new KeyValueStore('primaryCrc');
this._crcLookup = new KeyValueStore('crcLookup');
this._name = name;
this._data = [];
this._metrics = new Metrics();
this._deferQueue = {
insert: [],
update: [],
remove: [],
upsert: []
};
this._deferThreshold = {
insert: 100,
update: 100,
remove: 100,
upsert: 100
};
this._deferTime = {
insert: 1,
update: 1,
remove: 1,
upsert: 1
};
// Set the subset to itself since it is the root collection
this._subsetOf(this);
};
Shared.addModule('Collection', Collection);
Shared.mixin(Collection.prototype, 'Mixin.Common');
Shared.mixin(Collection.prototype, 'Mixin.Events');
Shared.mixin(Collection.prototype, 'Mixin.ChainReactor');
Shared.mixin(Collection.prototype, 'Mixin.CRUD');
Shared.mixin(Collection.prototype, 'Mixin.Constants');
Shared.mixin(Collection.prototype, 'Mixin.Triggers');
Shared.mixin(Collection.prototype, 'Mixin.Sorting');
Shared.mixin(Collection.prototype, 'Mixin.Matching');
Metrics = _dereq_('./Metrics');
KeyValueStore = _dereq_('./KeyValueStore');
Path = _dereq_('./Path');
IndexHashMap = _dereq_('./IndexHashMap');
IndexBinaryTree = _dereq_('./IndexBinaryTree');
Crc = _dereq_('./Crc');
Core = Shared.modules.Core;
Overload = _dereq_('./Overload');
ReactorIO = _dereq_('./ReactorIO');
/**
* Returns a checksum of a string.
* @param {String} string The string to checksum.
* @return {String} The checksum generated.
*/
Collection.prototype.crc = Crc;
/**
* Gets / sets the current state.
* @param {String=} val The name of the state to set.
* @returns {*}
*/
Shared.synthesize(Collection.prototype, 'state');
/**
* Gets / sets the name of the collection.
* @param {String=} val The name of the collection to set.
* @returns {*}
*/
Shared.synthesize(Collection.prototype, 'name');
/**
* Get the internal data
* @returns {Array}
*/
Collection.prototype.data = function () {
return this._data;
};
/**
* Drops a collection and all it's stored data from the database.
* @returns {boolean} True on success, false on failure.
*/
Collection.prototype.drop = function () {
var key;
if (this._state !== 'dropped') {
if (this._db && this._db._collection && this._name) {
if (this.debug()) {
console.log('Dropping collection ' + this._name);
}
this._state = 'dropped';
this.emit('drop', this);
delete this._db._collection[this._name];
// Remove any reactor IO chain links
if (this._collate) {
for (key in this._collate) {
if (this._collate.hasOwnProperty(key)) {
this.collateRemove(key);
}
}
}
delete this._primaryKey;
delete this._primaryIndex;
delete this._primaryCrc;
delete this._crcLookup;
delete this._name;
delete this._data;
delete this._metrics;
return true;
}
} else {
return true;
}
return false;
};
/**
* Gets / sets the primary key for this collection.
* @param {String=} keyName The name of the primary key.
* @returns {*}
*/
Collection.prototype.primaryKey = function (keyName) {
if (keyName !== undefined) {
if (this._primaryKey !== keyName) {
this._primaryKey = keyName;
// Set the primary key index primary key
this._primaryIndex.primaryKey(keyName);
// Rebuild the primary key index
this.rebuildPrimaryKeyIndex();
}
return this;
}
return this._primaryKey;
};
/**
* Handles insert events and routes changes to binds and views as required.
* @param {Array} inserted An array of inserted documents.
* @param {Array} failed An array of documents that failed to insert.
* @private
*/
Collection.prototype._onInsert = function (inserted, failed) {
this.emit('insert', inserted, failed);
};
/**
* Handles update events and routes changes to binds and views as required.
* @param {Array} items An array of updated documents.
* @private
*/
Collection.prototype._onUpdate = function (items) {
this.emit('update', items);
};
/**
* Handles remove events and routes changes to binds and views as required.
* @param {Array} items An array of removed documents.
* @private
*/
Collection.prototype._onRemove = function (items) {
this.emit('remove', items);
};
/**
* Gets / sets the db instance this class instance belongs to.
* @param {Core=} db The db instance.
* @returns {*}
*/
Shared.synthesize(Collection.prototype, 'db', function (db) {
if (db) {
if (this.primaryKey() === '_id') {
// Set primary key to the db's key by default
this.primaryKey(db.primaryKey());
}
}
return this.$super.apply(this, arguments);
});
/**
* Sets the collection's data to the array of documents passed.
* @param data
* @param options Optional options object.
* @param callback Optional callback function.
*/
Collection.prototype.setData = function (data, options, callback) {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
if (data) {
var op = this._metrics.create('setData');
op.start();
options = this.options(options);
this.preSetData(data, options, callback);
if (options.$decouple) {
data = this.decouple(data);
}
if (!(data instanceof Array)) {
data = [data];
}
op.time('transformIn');
data = this.transformIn(data);
op.time('transformIn');
var oldData = [].concat(this._data);
this._dataReplace(data);
// Update the primary key index
op.time('Rebuild Primary Key Index');
this.rebuildPrimaryKeyIndex(options);
op.time('Rebuild Primary Key Index');
// Rebuild all other indexes
op.time('Rebuild All Other Indexes');
this._rebuildIndexes();
op.time('Rebuild All Other Indexes');
op.time('Resolve chains');
this.chainSend('setData', data, {oldData: oldData});
op.time('Resolve chains');
op.stop();
this.emit('setData', this._data, oldData);
}
if (callback) { callback(false); }
return this;
};
/**
* Drops and rebuilds the primary key index for all documents in the collection.
* @param {Object=} options An optional options object.
* @private
*/
Collection.prototype.rebuildPrimaryKeyIndex = function (options) {
options = options || {
$ensureKeys: undefined,
$violationCheck: undefined
};
var ensureKeys = options && options.$ensureKeys !== undefined ? options.$ensureKeys : true,
violationCheck = options && options.$violationCheck !== undefined ? options.$violationCheck : true,
arr,
arrCount,
arrItem,
pIndex = this._primaryIndex,
crcIndex = this._primaryCrc,
crcLookup = this._crcLookup,
pKey = this._primaryKey,
jString;
// Drop the existing primary index
pIndex.truncate();
crcIndex.truncate();
crcLookup.truncate();
// Loop the data and check for a primary key in each object
arr = this._data;
arrCount = arr.length;
while (arrCount--) {
arrItem = arr[arrCount];
if (ensureKeys) {
// Make sure the item has a primary key
this.ensurePrimaryKey(arrItem);
}
if (violationCheck) {
// Check for primary key violation
if (!pIndex.uniqueSet(arrItem[pKey], arrItem)) {
// Primary key violation
throw('ForerunnerDB.Collection "' + this.name() + '": Call to setData on collection failed because your data violates the primary key unique constraint. One or more documents are using the same primary key: ' + arrItem[this._primaryKey]);
}
} else {
pIndex.set(arrItem[pKey], arrItem);
}
// Generate a CRC string
jString = JSON.stringify(arrItem);
crcIndex.set(arrItem[pKey], jString);
crcLookup.set(jString, arrItem);
}
};
/**
* Checks for a primary key on the document and assigns one if none
* currently exists.
* @param {Object} obj The object to check a primary key against.
* @private
*/
Collection.prototype.ensurePrimaryKey = function (obj) {
if (obj[this._primaryKey] === undefined) {
// Assign a primary key automatically
obj[this._primaryKey] = this.objectId();
}
};
/**
* Clears all data from the collection.
* @returns {Collection}
*/
Collection.prototype.truncate = function () {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
this.emit('truncate', this._data);
// Clear all the data from the collection
this._data.length = 0;
// Re-create the primary index data
this._primaryIndex = new KeyValueStore('primary');
this._primaryCrc = new KeyValueStore('primaryCrc');
this._crcLookup = new KeyValueStore('crcLookup');
this.deferEmit('change', {type: 'truncate'});
return this;
};
/**
* Modifies an existing document or documents in a collection. This will update
* all matches for 'query' with the data held in 'update'. It will not overwrite
* the matched documents with the update document.
*
* @param {Object} obj The document object to upsert or an array containing
* documents to upsert.
*
* If the document contains a primary key field (based on the collections's primary
* key) then the database will search for an existing document with a matching id.
* If a matching document is found, the document will be updated. Any keys that
* match keys on the existing document will be overwritten with new data. Any keys
* that do not currently exist on the document will be added to the document.
*
* If the document does not contain an id or the id passed does not match an existing
* document, an insert is performed instead. If no id is present a new primary key
* id is provided for the item.
*
* @param {Function=} callback Optional callback method.
* @returns {Object} An object containing two keys, "op" contains either "insert" or
* "update" depending on the type of operation that was performed and "result"
* contains the return data from the operation used.
*/
Collection.prototype.upsert = function (obj, callback) {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
if (obj) {
var queue = this._deferQueue.upsert,
deferThreshold = this._deferThreshold.upsert;
var returnData = {},
query,
i;
// Determine if the object passed is an array or not
if (obj instanceof Array) {
if (obj.length > deferThreshold) {
// Break up upsert into blocks
this._deferQueue.upsert = queue.concat(obj);
// Fire off the insert queue handler
this.processQueue('upsert', callback);
return {};
} else {
// Loop the array and upsert each item
returnData = [];
for (i = 0; i < obj.length; i++) {
returnData.push(this.upsert(obj[i]));
}
if (callback) { callback(); }
return returnData;
}
}
// Determine if the operation is an insert or an update
if (obj[this._primaryKey]) {
// Check if an object with this primary key already exists
query = {};
query[this._primaryKey] = obj[this._primaryKey];
if (this._primaryIndex.lookup(query)[0]) {
// The document already exists with this id, this operation is an update
returnData.op = 'update';
} else {
// No document with this id exists, this operation is an insert
returnData.op = 'insert';
}
} else {
// The document passed does not contain an id, this operation is an insert
returnData.op = 'insert';
}
switch (returnData.op) {
case 'insert':
returnData.result = this.insert(obj);
break;
case 'update':
returnData.result = this.update(query, obj);
break;
default:
break;
}
return returnData;
} else {
if (callback) { callback(); }
}
return {};
};
/**
* Modifies an existing document or documents in a collection. This will update
* all matches for 'query' with the data held in 'update'. It will not overwrite
* the matched documents with the update document.
*
* @param {Object} query The query that must be matched for a document to be
* operated on.
* @param {Object} update The object containing updated key/values. Any keys that
* match keys on the existing document will be overwritten with this data. Any
* keys that do not currently exist on the document will be added to the document.
* @param {Object=} options An options object.
* @returns {Array} The items that were updated.
*/
Collection.prototype.update = function (query, update, options) {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
// Decouple the update data
update = this.decouple(update);
// Handle transform
update = this.transformIn(update);
if (this.debug()) {
console.log('Updating some collection data for collection "' + this.name() + '"');
}
var self = this,
op = this._metrics.create('update'),
dataSet,
updated,
updateCall = function (originalDoc) {
var newDoc = self.decouple(originalDoc),
triggerOperation,
result;
if (self.willTrigger(self.TYPE_UPDATE, self.PHASE_BEFORE) || self.willTrigger(self.TYPE_UPDATE, self.PHASE_AFTER)) {
triggerOperation = {
type: 'update',
query: self.decouple(query),
update: self.decouple(update),
options: self.decouple(options),
op: op
};
// Update newDoc with the update criteria so we know what the data will look
// like AFTER the update is processed
result = self.updateObject(newDoc, triggerOperation.update, triggerOperation.query, triggerOperation.options, '');
if (self.processTrigger(triggerOperation, self.TYPE_UPDATE, self.PHASE_BEFORE, originalDoc, newDoc) !== false) {
// No triggers complained so let's execute the replacement of the existing
// object with the new one
result = self.updateObject(originalDoc, newDoc, triggerOperation.query, triggerOperation.options, '');
// NOTE: If for some reason we would only like to fire this event if changes are actually going
// to occur on the object from the proposed update then we can add "result &&" to the if
self.processTrigger(triggerOperation, self.TYPE_UPDATE, self.PHASE_AFTER, originalDoc, newDoc);
} else {
// Trigger cancelled operation so tell result that it was not updated
result = false;
}
} else {
// No triggers complained so let's execute the replacement of the existing
// object with the new one
result = self.updateObject(originalDoc, update, query, options, '');
}
return result;
};
op.start();
op.time('Retrieve documents to update');
dataSet = this.find(query, {$decouple: false});
op.time('Retrieve documents to update');
if (dataSet.length) {
op.time('Update documents');
updated = dataSet.filter(updateCall);
op.time('Update documents');
if (updated.length) {
op.time('Resolve chains');
this.chainSend('update', {
query: query,
update: update,
dataSet: dataSet
}, options);
op.time('Resolve chains');
this._onUpdate(updated);
this.deferEmit('change', {type: 'update', data: updated});
}
}
op.stop();
// TODO: Should we decouple the updated array before return by default?
return updated || [];
};
Collection.prototype._replaceObj = function (currentObj, newObj) {
var i;
// Check if the new document has a different primary key value from the existing one
// Remove item from indexes
this._removeFromIndexes(currentObj);
// Remove existing keys from current object
for (i in currentObj) {
if (currentObj.hasOwnProperty(i)) {
delete currentObj[i];
}
}
// Add new keys to current object
for (i in newObj) {
if (newObj.hasOwnProperty(i)) {
currentObj[i] = newObj[i];
}
}
// Update the item in the primary index
if (!this._insertIntoIndexes(currentObj)) {
throw('ForerunnerDB.Collection "' + this.name() + '": Primary key violation in update! Key violated: ' + currentObj[this._primaryKey]);
}
// Update the object in the collection data
//this._data.splice(this._data.indexOf(currentObj), 1, newObj);
return true;
};
/**
* Helper method to update a document from it's id.
* @param {String} id The id of the document.
* @param {Object} update The object containing the key/values to update to.
* @returns {Array} The items that were updated.
*/
Collection.prototype.updateById = function (id, update) {
var searchObj = {};
searchObj[this._primaryKey] = id;
return this.update(searchObj, update);
};
/**
* Internal method for document updating.
* @param {Object} doc The document to update.
* @param {Object} update The object with key/value pairs to update the document with.
* @param {Object} query The query object that we need to match to perform an update.
* @param {Object} options An options object.
* @param {String} path The current recursive path.
* @param {String} opType The type of update operation to perform, if none is specified
* default is to set new data against matching fields.
* @returns {Boolean} True if the document was updated with new / changed data or
* false if it was not updated because the data was the same.
* @private
*/
Collection.prototype.updateObject = function (doc, update, query, options, path, opType) {
// TODO: This method is long, try to break it into smaller pieces
update = this.decouple(update);
// Clear leading dots from path
path = path || '';
if (path.substr(0, 1) === '.') { path = path.substr(1, path.length -1); }
//var oldDoc = this.decouple(doc),
var updated = false,
recurseUpdated = false,
operation,
tmpArray,
tmpIndex,
tmpCount,
tempIndex,
pathInstance,
sourceIsArray,
updateIsArray,
i;
// Loop each key in the update object
for (i in update) {
if (update.hasOwnProperty(i)) {
// Reset operation flag
operation = false;
// Check if the property starts with a dollar (function)
if (i.substr(0, 1) === '$') {
// Check for commands
switch (i) {
case '$key':
case '$index':
// Ignore some operators
operation = true;
break;
default:
operation = true;
// Now run the operation
recurseUpdated = this.updateObject(doc, update[i], query, options, path, i);
updated = updated || recurseUpdated;
break;
}
}
// Check if the key has a .$ at the end, denoting an array lookup
if (this._isPositionalKey(i)) {
operation = true;
// Modify i to be the name of the field
i = i.substr(0, i.length - 2);
pathInstance = new Path(path + '.' + i);
// Check if the key is an array and has items
if (doc[i] && doc[i] instanceof Array && doc[i].length) {
tmpArray = [];
// Loop the array and find matches to our search
for (tmpIndex = 0; tmpIndex < doc[i].length; tmpIndex++) {
if (this._match(doc[i][tmpIndex], pathInstance.value(query)[0], '', {})) {
tmpArray.push(tmpIndex);
}
}
// Loop the items that matched and update them
for (tmpIndex = 0; tmpIndex < tmpArray.length; tmpIndex++) {
recurseUpdated = this.updateObject(doc[i][tmpArray[tmpIndex]], update[i + '.$'], query, options, path + '.' + i, opType);
updated = updated || recurseUpdated;
}
}
}
if (!operation) {
if (!opType && typeof(update[i]) === 'object') {
if (doc[i] !== null && typeof(doc[i]) === 'object') {
// Check if we are dealing with arrays
sourceIsArray = doc[i] instanceof Array;
updateIsArray = update[i] instanceof Array;
if (sourceIsArray || updateIsArray) {
// Check if the update is an object and the doc is an array
if (!updateIsArray && sourceIsArray) {
// Update is an object, source is an array so match the array items
// with our query object to find the one to update inside this array
// Loop the array and find matches to our search
for (tmpIndex = 0; tmpIndex < doc[i].length; tmpIndex++) {
recurseUpdated = this.updateObject(doc[i][tmpIndex], update[i], query, options, path + '.' + i, opType);
updated = updated || recurseUpdated;
}
} else {
// Either both source and update are arrays or the update is
// an array and the source is not, so set source to update
if (doc[i] !== update[i]) {
this._updateProperty(doc, i, update[i]);
updated = true;
}
}
} else {
// The doc key is an object so traverse the
// update further
recurseUpdated = this.updateObject(doc[i], update[i], query, options, path + '.' + i, opType);
updated = updated || recurseUpdated;
}
} else {
if (doc[i] !== update[i]) {
this._updateProperty(doc, i, update[i]);
updated = true;
}
}
} else {
switch (opType) {
case '$inc':
this._updateIncrement(doc, i, update[i]);
updated = true;
break;
case '$push':
// Check if the target key is undefined and if so, create an array
if (doc[i] === undefined) {
// Initialise a new array
this._updateProperty(doc, i, []);
}
// Check that the target key is an array
if (doc[i] instanceof Array) {
// Check for a $position modifier with an $each
if (update[i].$position !== undefined && update[i].$each instanceof Array) {
// Grab the position to insert at
tempIndex = update[i].$position;
// Loop the each array and push each item
tmpCount = update[i].$each.length;
for (tmpIndex = 0; tmpIndex < tmpCount; tmpIndex++) {
this._updateSplicePush(doc[i], tempIndex + tmpIndex, update[i].$each[tmpIndex]);
}
} else if (update[i].$each instanceof Array) {
// Do a loop over the each to push multiple items
tmpCount = update[i].$each.length;
for (tmpIndex = 0; tmpIndex < tmpCount; tmpIndex++) {
this._updatePush(doc[i], update[i].$each[tmpIndex]);
}
} else {
// Do a standard push
this._updatePush(doc[i], update[i]);
}
updated = true;
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot push to a key that is not an array! (' + i + ')');
}
break;
case '$pull':
if (doc[i] instanceof Array) {
tmpArray = [];
// Loop the array and find matches to our search
for (tmpIndex = 0; tmpIndex < doc[i].length; tmpIndex++) {
if (this._match(doc[i][tmpIndex], update[i], '', {})) {
tmpArray.push(tmpIndex);
}
}
tmpCount = tmpArray.length;
// Now loop the pull array and remove items to be pulled
while (tmpCount--) {
this._updatePull(doc[i], tmpArray[tmpCount]);
updated = true;
}
}
break;
case '$pullAll':
if (doc[i] instanceof Array) {
if (update[i] instanceof Array) {
tmpArray = doc[i];
tmpCount = tmpArray.length;
if (tmpCount > 0) {
// Now loop the pull array and remove items to be pulled
while (tmpCount--) {
for (tempIndex = 0; tempIndex < update[i].length; tempIndex++) {
if (tmpArray[tmpCount] === update[i][tempIndex]) {
this._updatePull(doc[i], tmpCount);
tmpCount--;
updated = true;
}
}
if (tmpCount < 0) {
break;
}
}
}
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot pullAll without being given an array of values to pull! (' + i + ')');
}
}
break;
case '$addToSet':
// Check if the target key is undefined and if so, create an array
if (doc[i] === undefined) {
// Initialise a new array
this._updateProperty(doc, i, []);
}
// Check that the target key is an array
if (doc[i] instanceof Array) {
// Loop the target array and check for existence of item
var targetArr = doc[i],
targetArrIndex,
targetArrCount = targetArr.length,
objHash,
addObj = true,
optionObj = (options && options.$addToSet),
hashMode,
pathSolver;
// Check if we have an options object for our operation
if (update[i].$key) {
hashMode = false;
pathSolver = new Path(update[i].$key);
objHash = pathSolver.value(update[i])[0];
// Remove the key from the object before we add it
delete update[i].$key;
} else if (optionObj && optionObj.key) {
hashMode = false;
pathSolver = new Path(optionObj.key);
objHash = pathSolver.value(update[i])[0];
} else {
objHash = JSON.stringify(update[i]);
hashMode = true;
}
for (targetArrIndex = 0; targetArrIndex < targetArrCount; targetArrIndex++) {
if (hashMode) {
// Check if objects match via a string hash (JSON)
if (JSON.stringify(targetArr[targetArrIndex]) === objHash) {
// The object already exists, don't add it
addObj = false;
break;
}
} else {
// Check if objects match based on the path
if (objHash === pathSolver.value(targetArr[targetArrIndex])[0]) {
// The object already exists, don't add it
addObj = false;
break;
}
}
}
if (addObj) {
this._updatePush(doc[i], update[i]);
updated = true;
}
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot addToSet on a key that is not an array! (' + i + ')');
}
break;
case '$splicePush':
// Check if the target key is undefined and if so, create an array
if (doc[i] === undefined) {
// Initialise a new array
this._updateProperty(doc, i, []);
}
// Check that the target key is an array
if (doc[i] instanceof Array) {
tempIndex = update.$index;
if (tempIndex !== undefined) {
delete update.$index;
// Check for out of bounds index
if (tempIndex > doc[i].length) {
tempIndex = doc[i].length;
}
this._updateSplicePush(doc[i], tempIndex, update[i]);
updated = true;
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot splicePush without a $index integer value!');
}
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot splicePush with a key that is not an array! (' + i + ')');
}
break;
case '$move':
if (doc[i] instanceof Array) {
// Loop the array and find matches to our search
for (tmpIndex = 0; tmpIndex < doc[i].length; tmpIndex++) {
if (this._match(doc[i][tmpIndex], update[i], '', {})) {
var moveToIndex = update.$index;
if (moveToIndex !== undefined) {
delete update.$index;
this._updateSpliceMove(doc[i], tmpIndex, moveToIndex);
updated = true;
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot move without a $index integer value!');
}
break;
}
}
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot move on a key that is not an array! (' + i + ')');
}
break;
case '$mul':
this._updateMultiply(doc, i, update[i]);
updated = true;
break;
case '$rename':
this._updateRename(doc, i, update[i]);
updated = true;
break;
case '$unset':
this._updateUnset(doc, i);
updated = true;
break;
case '$pop':
if (doc[i] instanceof Array) {
if (this._updatePop(doc[i], update[i])) {
updated = true;
}
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot pop from a key that is not an array! (' + i + ')');
}
break;
default:
if (doc[i] !== update[i]) {
this._updateProperty(doc, i, update[i]);
updated = true;
}
break;
}
}
}
}
}
return updated;
};
/**
* Determines if the passed key has an array positional mark (a dollar at the end
* of its name).
* @param {String} key The key to check.
* @returns {Boolean} True if it is a positional or false if not.
* @private
*/
Collection.prototype._isPositionalKey = function (key) {
return key.substr(key.length - 2, 2) === '.$';
};
/**
* Updates a property on an object depending on if the collection is
* currently running data-binding or not.
* @param {Object} doc The object whose property is to be updated.
* @param {String} prop The property to update.
* @param {*} val The new value of the property.
* @private
*/
Collection.prototype._updateProperty = function (doc, prop, val) {
doc[prop] = val;
if (this.debug()) {
console.log('ForerunnerDB.Collection: Setting non-data-bound document property "' + prop + '" for collection "' + this.name() + '"');
}
};
/**
* Increments a value for a property on a document by the passed number.
* @param {Object} doc The document to modify.
* @param {String} prop The property to modify.
* @param {Number} val The amount to increment by.
* @private
*/
Collection.prototype._updateIncrement = function (doc, prop, val) {
doc[prop] += val;
};
/**
* Changes the index of an item in the passed array.
* @param {Array} arr The array to modify.
* @param {Number} indexFrom The index to move the item from.
* @param {Number} indexTo The index to move the item to.
* @private
*/
Collection.prototype._updateSpliceMove = function (arr, indexFrom, indexTo) {
arr.splice(indexTo, 0, arr.splice(indexFrom, 1)[0]);
if (this.debug()) {
console.log('ForerunnerDB.Collection: Moving non-data-bound document array index from "' + indexFrom + '" to "' + indexTo + '" for collection "' + this.name() + '"');
}
};
/**
* Inserts an item into the passed array at the specified index.
* @param {Array} arr The array to insert into.
* @param {Number} index The index to insert at.
* @param {Object} doc The document to insert.
* @private
*/
Collection.prototype._updateSplicePush = function (arr, index, doc) {
if (arr.length > index) {
arr.splice(index, 0, doc);
} else {
arr.push(doc);
}
};
/**
* Inserts an item at the end of an array.
* @param {Array} arr The array to insert the item into.
* @param {Object} doc The document to insert.
* @private
*/
Collection.prototype._updatePush = function (arr, doc) {
arr.push(doc);
};
/**
* Removes an item from the passed array.
* @param {Array} arr The array to modify.
* @param {Number} index The index of the item in the array to remove.
* @private
*/
Collection.prototype._updatePull = function (arr, index) {
arr.splice(index, 1);
};
/**
* Multiplies a value for a property on a document by the passed number.
* @param {Object} doc The document to modify.
* @param {String} prop The property to modify.
* @param {Number} val The amount to multiply by.
* @private
*/
Collection.prototype._updateMultiply = function (doc, prop, val) {
doc[prop] *= val;
};
/**
* Renames a property on a document to the passed property.
* @param {Object} doc The document to modify.
* @param {String} prop The property to rename.
* @param {Number} val The new property name.
* @private
*/
Collection.prototype._updateRename = function (doc, prop, val) {
doc[val] = doc[prop];
delete doc[prop];
};
/**
* Deletes a property on a document.
* @param {Object} doc The document to modify.
* @param {String} prop The property to delete.
* @private
*/
Collection.prototype._updateUnset = function (doc, prop) {
delete doc[prop];
};
/**
* Pops an item from the array stack.
* @param {Object} doc The document to modify.
* @param {Number=} val Optional, if set to 1 will pop, if set to -1 will shift.
* @return {Boolean}
* @private
*/
Collection.prototype._updatePop = function (doc, val) {
var updated = false;
if (doc.length > 0) {
if (val === 1) {
doc.pop();
updated = true;
} else if (val === -1) {
doc.shift();
updated = true;
}
}
return updated;
};
/**
* Removes any documents from the collection that match the search query
* key/values.
* @param {Object} query The query object.
* @param {Object=} options An options object.
* @param {Function=} callback A callback method.
* @returns {Array} An array of the documents that were removed.
*/
Collection.prototype.remove = function (query, options, callback) {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
var self = this,
dataSet,
index,
arrIndex,
returnArr,
removeMethod,
triggerOperation,
doc,
newDoc;
if (query instanceof Array) {
returnArr = [];
for (arrIndex = 0; arrIndex < query.length; arrIndex++) {
returnArr.push(this.remove(query[arrIndex], {noEmit: true}));
}
if (!options || (options && !options.noEmit)) {
this._onRemove(returnArr);
}
if (callback) { callback(false, returnArr); }
return returnArr;
} else {
dataSet = this.find(query, {$decouple: false});
if (dataSet.length) {
removeMethod = function (dataItem) {
// Remove the item from the collection's indexes
self._removeFromIndexes(dataItem);
// Remove data from internal stores
index = self._data.indexOf(dataItem);
self._dataRemoveAtIndex(index);
};
// Remove the data from the collection
for (var i = 0; i < dataSet.length; i++) {
doc = dataSet[i];
if (self.willTrigger(self.TYPE_REMOVE, self.PHASE_BEFORE) || self.willTrigger(self.TYPE_REMOVE, self.PHASE_AFTER)) {
triggerOperation = {
type: 'remove'
};
newDoc = self.decouple(doc);
if (self.processTrigger(triggerOperation, self.TYPE_REMOVE, self.PHASE_BEFORE, newDoc, newDoc) !== false) {
// The trigger didn't ask to cancel so execute the removal method
removeMethod(doc);
self.processTrigger(triggerOperation, self.TYPE_REMOVE, self.PHASE_AFTER, newDoc, newDoc);
}
} else {
// No triggers to execute
removeMethod(doc);
}
}
//op.time('Resolve chains');
this.chainSend('remove', {
query: query,
dataSet: dataSet
}, options);
//op.time('Resolve chains');
if (!options || (options && !options.noEmit)) {
this._onRemove(dataSet);
}
this.deferEmit('change', {type: 'remove', data: dataSet});
}
if (callback) { callback(false, dataSet); }
return dataSet;
}
};
/**
* Helper method that removes a document that matches the given id.
* @param {String} id The id of the document to remove.
* @returns {Array} An array of documents that were removed.
*/
Collection.prototype.removeById = function (id) {
var searchObj = {};
searchObj[this._primaryKey] = id;
return this.remove(searchObj);
};
/**
* Queues an event to be fired. This has automatic de-bouncing so that any
* events of the same type that occur within 100 milliseconds of a previous
* one will all be wrapped into a single emit rather than emitting tons of
* events for lots of chained inserts etc.
* @private
*/
Collection.prototype.deferEmit = function () {
var self = this,
args;
if (!this._noEmitDefer && (!this._db || (this._db && !this._db._noEmitDefer))) {
args = arguments;
// Check for an existing timeout
if (this._changeTimeout) {
clearTimeout(this._changeTimeout);
}
// Set a timeout
this._changeTimeout = setTimeout(function () {
if (self.debug()) { console.log('ForerunnerDB.Collection: Emitting ' + args[0]); }
self.emit.apply(self, args);
}, 100);
} else {
this.emit.apply(this, arguments);
}
};
/**
* Processes a deferred action queue.
* @param {String} type The queue name to process.
* @param {Function} callback A method to call when the queue has processed.
*/
Collection.prototype.processQueue = function (type, callback) {
var queue = this._deferQueue[type],
deferThreshold = this._deferThreshold[type],
deferTime = this._deferTime[type];
if (queue.length) {
var self = this,
dataArr;
// Process items up to the threshold
if (queue.length) {
if (queue.length > deferThreshold) {
// Grab items up to the threshold value
dataArr = queue.splice(0, deferThreshold);
} else {
// Grab all the remaining items
dataArr = queue.splice(0, queue.length);
}
this[type](dataArr);
}
// Queue another process
setTimeout(function () {
self.processQueue(type, callback);
}, deferTime);
} else {
if (callback) { callback(); }
}
};
/**
* Inserts a document or array of documents into the collection.
* @param {Object||Array} data Either a document object or array of document
* @param {Number=} index Optional index to insert the record at.
* @param {Function=} callback Optional callback called once action is complete.
* objects to insert into the collection.
*/
Collection.prototype.insert = function (data, index, callback) {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
if (typeof(index) === 'function') {
callback = index;
index = this._data.length;
} else if (index === undefined) {
index = this._data.length;
}
data = this.transformIn(data);
return this._insertHandle(data, index, callback);
};
/**
* Inserts a document or array of documents into the collection.
* @param {Object||Array} data Either a document object or array of document
* @param {Number=} index Optional index to insert the record at.
* @param {Function=} callback Optional callback called once action is complete.
* objects to insert into the collection.
*/
Collection.prototype._insertHandle = function (data, index, callback) {
var //self = this,
queue = this._deferQueue.insert,
deferThreshold = this._deferThreshold.insert,
//deferTime = this._deferTime.insert,
inserted = [],
failed = [],
insertResult,
i;
if (data instanceof Array) {
// Check if there are more insert items than the insert defer
// threshold, if so, break up inserts so we don't tie up the
// ui or thread
if (data.length > deferThreshold) {
// Break up insert into blocks
this._deferQueue.insert = queue.concat(data);
// Fire off the insert queue handler
this.processQueue('insert', callback);
return;
} else {
// Loop the array and add items
for (i = 0; i < data.length; i++) {
insertResult = this._insert(data[i], index + i);
if (insertResult === true) {
inserted.push(data[i]);
} else {
failed.push({
doc: data[i],
reason: insertResult
});
}
}
}
} else {
// Store the data item
insertResult = this._insert(data, index);
if (insertResult === true) {
inserted.push(data);
} else {
failed.push({
doc: data,
reason: insertResult
});
}
}
//op.time('Resolve chains');
this.chainSend('insert', data, {index: index});
//op.time('Resolve chains');
this._onInsert(inserted, failed);
if (callback) { callback(); }
this.deferEmit('change', {type: 'insert', data: inserted});
return {
inserted: inserted,
failed: failed
};
};
/**
* Internal method to insert a document into the collection. Will
* check for index violations before allowing the document to be inserted.
* @param {Object} doc The document to insert after passing index violation
* tests.
* @param {Number=} index Optional index to insert the document at.
* @returns {Boolean|Object} True on success, false if no document passed,
* or an object containing details about an index violation if one occurred.
* @private
*/
Collection.prototype._insert = function (doc, index) {
if (doc) {
var self = this,
indexViolation,
triggerOperation,
insertMethod,
newDoc;
this.ensurePrimaryKey(doc);
// Check indexes are not going to be broken by the document
indexViolation = this.insertIndexViolation(doc);
insertMethod = function (doc) {
// Add the item to the collection's indexes
self._insertIntoIndexes(doc);
// Check index overflow
if (index > self._data.length) {
index = self._data.length;
}
// Insert the document
self._dataInsertAtIndex(index, doc);
};
if (!indexViolation) {
if (self.willTrigger(self.TYPE_INSERT, self.PHASE_BEFORE) || self.willTrigger(self.TYPE_INSERT, self.PHASE_AFTER)) {
triggerOperation = {
type: 'insert'
};
if (self.processTrigger(triggerOperation, self.TYPE_INSERT, self.PHASE_BEFORE, {}, doc) !== false) {
insertMethod(doc);
if (self.willTrigger(self.TYPE_INSERT, self.PHASE_AFTER)) {
// Clone the doc so that the programmer cannot update the internal document
// on the "after" phase trigger
newDoc = self.decouple(doc);
self.processTrigger(triggerOperation, self.TYPE_INSERT, self.PHASE_AFTER, {}, newDoc);
}
} else {
// The trigger just wants to cancel the operation
return false;
}
} else {
// No triggers to execute
insertMethod(doc);
}
return true;
} else {
return 'Index violation in index: ' + indexViolation;
}
}
return 'No document passed to insert';
};
/**
* Inserts a document into the internal collection data array at
* Inserts a document into the internal collection data array at
* the specified index.
* @param {number} index The index to insert at.
* @param {object} doc The document to insert.
* @private
*/
Collection.prototype._dataInsertAtIndex = function (index, doc) {
this._data.splice(index, 0, doc);
};
/**
* Removes a document from the internal collection data array at
* the specified index.
* @param {number} index The index to remove from.
* @private
*/
Collection.prototype._dataRemoveAtIndex = function (index) {
this._data.splice(index, 1);
};
/**
* Replaces all data in the collection's internal data array with
* the passed array of data.
* @param {array} data The array of data to replace existing data with.
* @private
*/
Collection.prototype._dataReplace = function (data) {
// Clear the array - using a while loop with pop is by far the
// fastest way to clear an array currently
while (this._data.length) {
this._data.pop();
}
// Append new items to the array
this._data = this._data.concat(data);
};
/**
* Inserts a document into the collection indexes.
* @param {Object} doc The document to insert.
* @private
*/
Collection.prototype._insertIntoIndexes = function (doc) {
var arr = this._indexByName,
arrIndex,
violated,
jString = JSON.stringify(doc);
// Insert to primary key index
violated = this._primaryIndex.uniqueSet(doc[this._primaryKey], doc);
this._primaryCrc.uniqueSet(doc[this._primaryKey], jString);
this._crcLookup.uniqueSet(jString, doc);
// Insert into other indexes
for (arrIndex in arr) {
if (arr.hasOwnProperty(arrIndex)) {
arr[arrIndex].insert(doc);
}
}
return violated;
};
/**
* Removes a document from the collection indexes.
* @param {Object} doc The document to remove.
* @private
*/
Collection.prototype._removeFromIndexes = function (doc) {
var arr = this._indexByName,
arrIndex,
jString = JSON.stringify(doc);
// Remove from primary key index
this._primaryIndex.unSet(doc[this._primaryKey]);
this._primaryCrc.unSet(doc[this._primaryKey]);
this._crcLookup.unSet(jString);
// Remove from other indexes
for (arrIndex in arr) {
if (arr.hasOwnProperty(arrIndex)) {
arr[arrIndex].remove(doc);
}
}
};
/**
* Rebuild collection indexes.
* @private
*/
Collection.prototype._rebuildIndexes = function () {
var arr = this._indexByName,
arrIndex;
// Remove from other indexes
for (arrIndex in arr) {
if (arr.hasOwnProperty(arrIndex)) {
arr[arrIndex].rebuild();
}
}
};
/**
* Returns the index of the document identified by the passed item's primary key.
* @param {Object} item The item whose primary key should be used to lookup.
* @returns {Number} The index the item with the matching primary key is occupying.
*/
Collection.prototype.indexOfDocById = function (item) {
return this._data.indexOf(
this._primaryIndex.get(
item[this._primaryKey]
)
);
};
/**
* Uses the passed query to generate a new collection with results
* matching the query parameters.
*
* @param query
* @param options
* @returns {*}
*/
Collection.prototype.subset = function (query, options) {
var result = this.find(query, options);
return new Collection()
._subsetOf(this)
.primaryKey(this._primaryKey)
.setData(result);
};
/**
* Gets the collection that this collection is a subset of.
* @returns {Collection}
*/
Collection.prototype.subsetOf = function () {
return this.__subsetOf;
};
/**
* Sets the collection that this collection is a subset of.
* @param {Collection} collection The collection to set as the parent of this subset.
* @returns {*} This object for chaining.
* @private
*/
Collection.prototype._subsetOf = function (collection) {
this.__subsetOf = collection;
return this;
};
/**
* Find the distinct values for a specified field across a single collection and
* returns the results in an array.
* @param {String} key The field path to return distinct values for e.g. "person.name".
* @param {Object=} query The query to use to filter the documents used to return values from.
* @param {Object=} options The query options to use when running the query.
* @returns {Array}
*/
Collection.prototype.distinct = function (key, query, options) {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
var data = this.find(query, options),
pathSolver = new Path(key),
valueUsed = {},
distinctValues = [],
value,
i;
// Loop the data and build array of distinct values
for (i = 0; i < data.length; i++) {
value = pathSolver.value(data[i])[0];
if (value && !valueUsed[value]) {
valueUsed[value] = true;
distinctValues.push(value);
}
}
return distinctValues;
};
/**
* Helper method to find a document by it's id.
* @param {String} id The id of the document.
* @param {Object=} options The options object, allowed keys are sort and limit.
* @returns {Array} The items that were updated.
*/
Collection.prototype.findById = function (id, options) {
var searchObj = {};
searchObj[this._primaryKey] = id;
return this.find(searchObj, options)[0];
};
/**
* Finds all documents that contain the passed string or search object
* regardless of where the string might occur within the document. This
* will match strings from the start, middle or end of the document's
* string (partial match).
* @param search The string to search for. Case sensitive.
* @param options A standard find() options object.
* @returns {Array} An array of documents that matched the search string.
*/
Collection.prototype.peek = function (search, options) {
// Loop all items
var arr = this._data,
arrCount = arr.length,
arrIndex,
arrItem,
tempColl = new Collection(),
typeOfSearch = typeof search;
if (typeOfSearch === 'string') {
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
// Get json representation of object
arrItem = JSON.stringify(arr[arrIndex]);
// Check if string exists in object json
if (arrItem.indexOf(search) > -1) {
// Add this item to the temp collection
tempColl.insert(arr[arrIndex]);
}
}
return tempColl.find({}, options);
} else {
return this.find(search, options);
}
};
/**
* Provides a query plan / operations log for a query.
* @param {Object} query The query to execute.
* @param {Object=} options Optional options object.
* @returns {Object} The query plan.
*/
Collection.prototype.explain = function (query, options) {
var result = this.find(query, options);
return result.__fdbOp._data;
};
/**
* Generates an options object with default values or adds default
* values to a passed object if those values are not currently set
* to anything.
* @param {object=} obj Optional options object to modify.
* @returns {object} The options object.
*/
Collection.prototype.options = function (obj) {
obj = obj || {};
obj.$decouple = obj.$decouple !== undefined ? obj.$decouple : true;
obj.$explain = obj.$explain !== undefined ? obj.$explain : false;
return obj;
};
/**
* Queries the collection based on the query object passed.
* @param {Object} query The query key/values that a document must match in
* order for it to be returned in the result array.
* @param {Object=} options An optional options object.
*
* @returns {Array} The results array from the find operation, containing all
* documents that matched the query.
*/
Collection.prototype.find = function (query, options) {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
// TODO: This method is quite long, break into smaller pieces
query = query || {};
options = this.options(options);
var op = this._metrics.create('find'),
pk = this.primaryKey(),
self = this,
analysis,
//finalQuery,
scanLength,
requiresTableScan = true,
resultArr,
joinCollectionIndex,
joinIndex,
joinCollection = {},
joinQuery,
joinPath,
joinCollectionName,
joinCollectionInstance,
joinMatch,
joinMatchIndex,
joinSearch,
joinMulti,
joinRequire,
joinFindResults,
resultCollectionName,
resultIndex,
resultRemove = [],
index,
i, j, k,
fieldListOn = [],
fieldListOff = [],
elemMatchPathSolver,
elemMatchSubArr,
elemMatchSpliceArr,
matcherTmpOptions = {},
result,
matcher = function (doc) {
return self._match(doc, query, 'and', matcherTmpOptions);
};
op.start();
if (query) {
// Get query analysis to execute best optimised code path
op.time('analyseQuery');
analysis = this._analyseQuery(query, options, op);
op.time('analyseQuery');
op.data('analysis', analysis);
if (analysis.hasJoin && analysis.queriesJoin) {
// The query has a join and tries to limit by it's joined data
// Get an instance reference to the join collections
op.time('joinReferences');
for (joinIndex = 0; joinIndex < analysis.joinsOn.length; joinIndex++) {
joinCollectionName = analysis.joinsOn[joinIndex];
joinPath = new Path(analysis.joinQueries[joinCollectionName]);
joinQuery = joinPath.value(query)[0];
joinCollection[analysis.joinsOn[joinIndex]] = this._db.collection(analysis.joinsOn[joinIndex]).subset(joinQuery);
}
op.time('joinReferences');
}
// Check if an index lookup can be used to return this result
if (analysis.indexMatch.length && (!options || (options && !options.$skipIndex))) {
op.data('index.potential', analysis.indexMatch);
op.data('index.used', analysis.indexMatch[0].index);
// Get the data from the index
op.time('indexLookup');
resultArr = analysis.indexMatch[0].lookup;
op.time('indexLookup');
// Check if the index coverage is all keys, if not we still need to table scan it
if (analysis.indexMatch[0].keyData.totalKeyCount === analysis.indexMatch[0].keyData.score) {
// Require a table scan to find relevant documents
requiresTableScan = false;
}
} else {
op.flag('usedIndex', false);
}
if (requiresTableScan) {
if (resultArr && resultArr.length) {
scanLength = resultArr.length;
op.time('tableScan: ' + scanLength);
// Filter the source data and return the result
resultArr = resultArr.filter(matcher);
} else {
// Filter the source data and return the result
scanLength = this._data.length;
op.time('tableScan: ' + scanLength);
resultArr = this._data.filter(matcher);
}
// Order the array if we were passed a sort clause
if (options.$orderBy) {
op.time('sort');
resultArr = this.sort(options.$orderBy, resultArr);
op.time('sort');
}
op.time('tableScan: ' + scanLength);
}
if (options.$limit && resultArr && resultArr.length > options.$limit) {
resultArr.length = options.$limit;
op.data('limit', options.$limit);
}
if (options.$decouple) {
// Now decouple the data from the original objects
op.time('decouple');
resultArr = this.decouple(resultArr);
op.time('decouple');
op.data('flag.decouple', true);
}
// Now process any joins on the final data
if (options.$join) {
for (joinCollectionIndex = 0; joinCollectionIndex < options.$join.length; joinCollectionIndex++) {
for (joinCollectionName in options.$join[joinCollectionIndex]) {
if (options.$join[joinCollectionIndex].hasOwnProperty(joinCollectionName)) {
// Set the key to store the join result in to the collection name by default
resultCollectionName = joinCollectionName;
// Get the join collection instance from the DB
joinCollectionInstance = this._db.collection(joinCollectionName);
// Get the match data for the join
joinMatch = options.$join[joinCollectionIndex][joinCollectionName];
// Loop our result data array
for (resultIndex = 0; resultIndex < resultArr.length; resultIndex++) {
// Loop the join conditions and build a search object from them
joinSearch = {};
joinMulti = false;
joinRequire = false;
for (joinMatchIndex in joinMatch) {
if (joinMatch.hasOwnProperty(joinMatchIndex)) {
// Check the join condition name for a special command operator
if (joinMatchIndex.substr(0, 1) === '$') {
// Special command
switch (joinMatchIndex) {
case '$as':
// Rename the collection when stored in the result document
resultCollectionName = joinMatch[joinMatchIndex];
break;
case '$multi':
// Return an array of documents instead of a single matching document
joinMulti = joinMatch[joinMatchIndex];
break;
case '$require':
// Remove the result item if no matching join data is found
joinRequire = joinMatch[joinMatchIndex];
break;
/*default:
// Check for a double-dollar which is a back-reference to the root collection item
if (joinMatchIndex.substr(0, 3) === '$$.') {
// Back reference
// TODO: Support complex joins
}
break;*/
}
} else {
// TODO: Could optimise this by caching path objects
// Get the data to match against and store in the search object
joinSearch[joinMatchIndex] = new Path(joinMatch[joinMatchIndex]).value(resultArr[resultIndex])[0];
}
}
}
// Do a find on the target collection against the match data
joinFindResults = joinCollectionInstance.find(joinSearch);
// Check if we require a joined row to allow the result item
if (!joinRequire || (joinRequire && joinFindResults[0])) {
// Join is not required or condition is met
resultArr[resultIndex][resultCollectionName] = joinMulti === false ? joinFindResults[0] : joinFindResults;
} else {
// Join required but condition not met, add item to removal queue
resultRemove.push(resultArr[resultIndex]);
}
}
}
}
}
op.data('flag.join', true);
}
// Process removal queue
if (resultRemove.length) {
op.time('removalQueue');
for (i = 0; i < resultRemove.length; i++) {
index = resultArr.indexOf(resultRemove[i]);
if (index > -1) {
resultArr.splice(index, 1);
}
}
op.time('removalQueue');
}
if (options.$transform) {
op.time('transform');
for (i = 0; i < resultArr.length; i++) {
resultArr.splice(i, 1, options.$transform(resultArr[i]));
}
op.time('transform');
op.data('flag.transform', true);
}
// Process transforms
if (this._transformEnabled && this._transformOut) {
op.time('transformOut');
resultArr = this.transformOut(resultArr);
op.time('transformOut');
}
op.data('results', resultArr.length);
} else {
resultArr = [];
}
// Generate a list of fields to limit data by
// Each property starts off being enabled by default (= 1) then
// if any property is explicitly specified as 1 then all switch to
// zero except _id.
//
// Any that are explicitly set to zero are switched off.
op.time('scanFields');
for (i in options) {
if (options.hasOwnProperty(i) && i.indexOf('$') !== 0) {
if (options[i] === 1) {
fieldListOn.push(i);
} else if (options[i] === 0) {
fieldListOff.push(i);
}
}
}
op.time('scanFields');
// Limit returned fields by the options data
if (fieldListOn.length || fieldListOff.length) {
op.data('flag.limitFields', true);
op.data('limitFields.on', fieldListOn);
op.data('limitFields.off', fieldListOff);
op.time('limitFields');
// We have explicit fields switched on or off
for (i = 0; i < resultArr.length; i++) {
result = resultArr[i];
for (j in result) {
if (result.hasOwnProperty(j)) {
if (fieldListOn.length) {
// We have explicit fields switched on so remove all fields
// that are not explicitly switched on
// Check if the field name is not the primary key
if (j !== pk) {
if (fieldListOn.indexOf(j) === -1) {
// This field is not in the on list, remove it
delete result[j];
}
}
}
if (fieldListOff.length) {
// We have explicit fields switched off so remove fields
// that are explicitly switched off
if (fieldListOff.indexOf(j) > -1) {
// This field is in the off list, remove it
delete result[j];
}
}
}
}
}
op.time('limitFields');
}
// Now run any projections on the data required
if (options.$elemMatch) {
op.data('flag.elemMatch', true);
op.time('projection-elemMatch');
for (i in options.$elemMatch) {
if (options.$elemMatch.hasOwnProperty(i)) {
elemMatchPathSolver = new Path(i);
// Loop the results array
for (j = 0; j < resultArr.length; j++) {
elemMatchSubArr = elemMatchPathSolver.value(resultArr[j])[0];
// Check we have a sub-array to loop
if (elemMatchSubArr && elemMatchSubArr.length) {
// Loop the sub-array and check for projection query matches
for (k = 0; k < elemMatchSubArr.length; k++) {
// Check if the current item in the sub-array matches the projection query
if (self._match(elemMatchSubArr[k], options.$elemMatch[i], '', {})) {
// The item matches the projection query so set the sub-array
// to an array that ONLY contains the matching item and then
// exit the loop since we only want to match the first item
elemMatchPathSolver.set(resultArr[j], i, [elemMatchSubArr[k]]);
break;
}
}
}
}
}
}
op.time('projection-elemMatch');
}
if (options.$elemsMatch) {
op.data('flag.elemsMatch', true);
op.time('projection-elemsMatch');
for (i in options.$elemsMatch) {
if (options.$elemsMatch.hasOwnProperty(i)) {
elemMatchPathSolver = new Path(i);
// Loop the results array
for (j = 0; j < resultArr.length; j++) {
elemMatchSubArr = elemMatchPathSolver.value(resultArr[j])[0];
// Check we have a sub-array to loop
if (elemMatchSubArr && elemMatchSubArr.length) {
elemMatchSpliceArr = [];
// Loop the sub-array and check for projection query matches
for (k = 0; k < elemMatchSubArr.length; k++) {
// Check if the current item in the sub-array matches the projection query
if (self._match(elemMatchSubArr[k], options.$elemsMatch[i], '', {})) {
// The item matches the projection query so add it to the final array
elemMatchSpliceArr.push(elemMatchSubArr[k]);
}
}
// Now set the final sub-array to the matched items
elemMatchPathSolver.set(resultArr[j], i, elemMatchSpliceArr);
}
}
}
}
op.time('projection-elemsMatch');
}
op.stop();
resultArr.__fdbOp = op;
return resultArr;
};
/**
* Returns one document that satisfies the specified query criteria. If multiple
* documents satisfy the query, this method returns the first document to match
* the query.
* @returns {*}
*/
Collection.prototype.findOne = function () {
return (this.find.apply(this, arguments))[0];
};
/**
* Gets the index in the collection data array of the first item matched by
* the passed query object.
* @param {Object} query The query to run to find the item to return the index of.
* @returns {Number}
*/
Collection.prototype.indexOf = function (query) {
var item = this.find(query, {$decouple: false})[0];
if (item) {
return this._data.indexOf(item);
}
};
/**
* Gets / sets the collection transform options.
* @param {Object} obj A collection transform options object.
* @returns {*}
*/
Collection.prototype.transform = function (obj) {
if (obj !== undefined) {
if (typeof obj === "object") {
if (obj.enabled !== undefined) {
this._transformEnabled = obj.enabled;
}
if (obj.dataIn !== undefined) {
this._transformIn = obj.dataIn;
}
if (obj.dataOut !== undefined) {
this._transformOut = obj.dataOut;
}
} else {
this._transformEnabled = obj !== false;
}
return this;
}
return {
enabled: this._transformEnabled,
dataIn: this._transformIn,
dataOut: this._transformOut
};
};
/**
* Transforms data using the set transformIn method.
* @param {Object} data The data to transform.
* @returns {*}
*/
Collection.prototype.transformIn = function (data) {
if (this._transformEnabled && this._transformIn) {
if (data instanceof Array) {
var finalArr = [], i;
for (i = 0; i < data.length; i++) {
finalArr[i] = this._transformIn(data[i]);
}
return finalArr;
} else {
return this._transformIn(data);
}
}
return data;
};
/**
* Transforms data using the set transformOut method.
* @param {Object} data The data to transform.
* @returns {*}
*/
Collection.prototype.transformOut = function (data) {
if (this._transformEnabled && this._transformOut) {
if (data instanceof Array) {
var finalArr = [], i;
for (i = 0; i < data.length; i++) {
finalArr[i] = this._transformOut(data[i]);
}
return finalArr;
} else {
return this._transformOut(data);
}
}
return data;
};
/**
* Sorts an array of documents by the given sort path.
* @param {*} sortObj The keys and orders the array objects should be sorted by.
* @param {Array} arr The array of documents to sort.
* @returns {Array}
*/
Collection.prototype.sort = function (sortObj, arr) {
// Make sure we have an array object
arr = arr || [];
var sortArr = [],
sortKey,
sortSingleObj;
for (sortKey in sortObj) {
if (sortObj.hasOwnProperty(sortKey)) {
sortSingleObj = {};
sortSingleObj[sortKey] = sortObj[sortKey];
sortSingleObj.___fdbKey = sortKey;
sortArr.push(sortSingleObj);
}
}
if (sortArr.length < 2) {
// There is only one sort criteria, do a simple sort and return it
return this._sort(sortObj, arr);
} else {
return this._bucketSort(sortArr, arr);
}
};
/**
* Takes array of sort paths and sorts them into buckets before returning final
* array fully sorted by multi-keys.
* @param keyArr
* @param arr
* @returns {*}
* @private
*/
Collection.prototype._bucketSort = function (keyArr, arr) {
var keyObj = keyArr.shift(),
arrCopy,
buckets,
i,
finalArr = [];
if (keyArr.length > 0) {
// Sort array by bucket key
arr = this._sort(keyObj, arr);
// Split items into buckets
buckets = this.bucket(keyObj.___fdbKey, arr);
// Loop buckets and sort contents
for (i in buckets) {
if (buckets.hasOwnProperty(i)) {
arrCopy = [].concat(keyArr);
finalArr = finalArr.concat(this._bucketSort(arrCopy, buckets[i]));
}
}
return finalArr;
} else {
return this._sort(keyObj, arr);
}
};
/**
* Sorts array by individual sort path.
* @param key
* @param arr
* @returns {Array|*}
* @private
*/
Collection.prototype._sort = function (key, arr) {
var self = this,
sorterMethod,
pathSolver = new Path(),
dataPath = pathSolver.parse(key, true)[0];
pathSolver.path(dataPath.path);
if (dataPath.value === 1) {
// Sort ascending
sorterMethod = function (a, b) {
var valA = pathSolver.value(a)[0],
valB = pathSolver.value(b)[0];
return self.sortAsc(valA, valB);
};
} else if (dataPath.value === -1) {
// Sort descending
sorterMethod = function (a, b) {
var valA = pathSolver.value(a)[0],
valB = pathSolver.value(b)[0];
return self.sortDesc(valA, valB);
};
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": $orderBy clause has invalid direction: ' + dataPath.value + ', accepted values are 1 or -1 for ascending or descending!');
}
return arr.sort(sorterMethod);
};
/**
* Takes an array of objects and returns a new object with the array items
* split into buckets by the passed key.
* @param {String} key The key to split the array into buckets by.
* @param {Array} arr An array of objects.
* @returns {Object}
*/
Collection.prototype.bucket = function (key, arr) {
var i,
buckets = {};
for (i = 0; i < arr.length; i++) {
buckets[arr[i][key]] = buckets[arr[i][key]] || [];
buckets[arr[i][key]].push(arr[i]);
}
return buckets;
};
/**
* Internal method that takes a search query and options and returns an object
* containing details about the query which can be used to optimise the search.
*
* @param query
* @param options
* @param op
* @returns {Object}
* @private
*/
Collection.prototype._analyseQuery = function (query, options, op) {
var analysis = {
queriesOn: [this._name],
indexMatch: [],
hasJoin: false,
queriesJoin: false,
joinQueries: {},
query: query,
options: options
},
joinCollectionIndex,
joinCollectionName,
joinCollections = [],
joinCollectionReferences = [],
queryPath,
index,
indexMatchData,
indexRef,
indexRefName,
indexLookup,
pathSolver,
queryKeyCount,
i;
// Check if the query is a primary key lookup
op.time('checkIndexes');
pathSolver = new Path();
queryKeyCount = pathSolver.countKeys(query);
if (queryKeyCount) {
if (query[this._primaryKey] !== undefined) {
// Return item via primary key possible
op.time('checkIndexMatch: Primary Key');
analysis.indexMatch.push({
lookup: this._primaryIndex.lookup(query, options),
keyData: {
matchedKeys: [this._primaryKey],
totalKeyCount: queryKeyCount,
score: 1
},
index: this._primaryIndex
});
op.time('checkIndexMatch: Primary Key');
}
// Check if an index can speed up the query
for (i in this._indexById) {
if (this._indexById.hasOwnProperty(i)) {
indexRef = this._indexById[i];
indexRefName = indexRef.name();
op.time('checkIndexMatch: ' + indexRefName);
indexMatchData = indexRef.match(query, options);
if (indexMatchData.score > 0) {
// This index can be used, store it
indexLookup = indexRef.lookup(query, options);
analysis.indexMatch.push({
lookup: indexLookup,
keyData: indexMatchData,
index: indexRef
});
}
op.time('checkIndexMatch: ' + indexRefName);
if (indexMatchData.score === queryKeyCount) {
// Found an optimal index, do not check for any more
break;
}
}
}
op.time('checkIndexes');
// Sort array descending on index key count (effectively a measure of relevance to the query)
if (analysis.indexMatch.length > 1) {
op.time('findOptimalIndex');
analysis.indexMatch.sort(function (a, b) {
if (a.keyData.score > b.keyData.score) {
// This index has a higher score than the other
return -1;
}
if (a.keyData.score < b.keyData.score) {
// This index has a lower score than the other
return 1;
}
// The indexes have the same score but can still be compared by the number of records
// they return from the query. The fewer records they return the better so order by
// record count
if (a.keyData.score === b.keyData.score) {
return a.lookup.length - b.lookup.length;
}
});
op.time('findOptimalIndex');
}
}
// Check for join data
if (options.$join) {
analysis.hasJoin = true;
// Loop all join operations
for (joinCollectionIndex = 0; joinCollectionIndex < options.$join.length; joinCollectionIndex++) {
// Loop the join collections and keep a reference to them
for (joinCollectionName in options.$join[joinCollectionIndex]) {
if (options.$join[joinCollectionIndex].hasOwnProperty(joinCollectionName)) {
joinCollections.push(joinCollectionName);
// Check if the join uses an $as operator
if ('$as' in options.$join[joinCollectionIndex][joinCollectionName]) {
joinCollectionReferences.push(options.$join[joinCollectionIndex][joinCollectionName].$as);
} else {
joinCollectionReferences.push(joinCollectionName);
}
}
}
}
// Loop the join collection references and determine if the query references
// any of the collections that are used in the join. If there no queries against
// joined collections the find method can use a code path optimised for this.
// Queries against joined collections requires the joined collections to be filtered
// first and then joined so requires a little more work.
for (index = 0; index < joinCollectionReferences.length; index++) {
// Check if the query references any collection data that the join will create
queryPath = this._queryReferencesCollection(query, joinCollectionReferences[index], '');
if (queryPath) {
analysis.joinQueries[joinCollections[index]] = queryPath;
analysis.queriesJoin = true;
}
}
analysis.joinsOn = joinCollections;
analysis.queriesOn = analysis.queriesOn.concat(joinCollections);
}
return analysis;
};
/**
* Checks if the passed query references this collection.
* @param query
* @param collection
* @param path
* @returns {*}
* @private
*/
Collection.prototype._queryReferencesCollection = function (query, collection, path) {
var i;
for (i in query) {
if (query.hasOwnProperty(i)) {
// Check if this key is a reference match
if (i === collection) {
if (path) { path += '.'; }
return path + i;
} else {
if (typeof(query[i]) === 'object') {
// Recurse
if (path) { path += '.'; }
path += i;
return this._queryReferencesCollection(query[i], collection, path);
}
}
}
}
return false;
};
/**
* Returns the number of documents currently in the collection.
* @returns {Number}
*/
Collection.prototype.count = function (query, options) {
if (!query) {
return this._data.length;
} else {
// Run query and return count
return this.find(query, options).length;
}
};
/**
* Finds sub-documents from the collection's documents.
* @param match
* @param path
* @param subDocQuery
* @param subDocOptions
* @returns {*}
*/
Collection.prototype.findSub = function (match, path, subDocQuery, subDocOptions) {
var pathHandler = new Path(path),
docArr = this.find(match),
docCount = docArr.length,
docIndex,
subDocArr,
subDocCollection = this._db.collection('__FDB_temp_' + this.objectId()),
subDocResults,
resultObj = {
parents: docCount,
subDocTotal: 0,
subDocs: [],
pathFound: false,
err: ''
};
for (docIndex = 0; docIndex < docCount; docIndex++) {
subDocArr = pathHandler.value(docArr[docIndex])[0];
if (subDocArr) {
subDocCollection.setData(subDocArr);
subDocResults = subDocCollection.find(subDocQuery, subDocOptions);
if (subDocOptions.returnFirst && subDocResults.length) {
return subDocResults[0];
}
resultObj.subDocs.push(subDocResults);
resultObj.subDocTotal += subDocResults.length;
resultObj.pathFound = true;
}
}
// Drop the sub-document collection
subDocCollection.drop();
// Check if the call should not return stats, if so return only subDocs array
if (subDocOptions.noStats) {
return resultObj.subDocs;
}
if (!resultObj.pathFound) {
resultObj.err = 'No objects found in the parent documents with a matching path of: ' + path;
}
return resultObj;
};
/**
* Checks that the passed document will not violate any index rules if
* inserted into the collection.
* @param {Object} doc The document to check indexes against.
* @returns {Boolean} Either false (no violation occurred) or true if
* a violation was detected.
*/
Collection.prototype.insertIndexViolation = function (doc) {
var indexViolated,
arr = this._indexByName,
arrIndex,
arrItem;
// Check the item's primary key is not already in use
if (this._primaryIndex.get(doc[this._primaryKey])) {
indexViolated = this._primaryIndex;
} else {
// Check violations of other indexes
for (arrIndex in arr) {
if (arr.hasOwnProperty(arrIndex)) {
arrItem = arr[arrIndex];
if (arrItem.unique()) {
if (arrItem.violation(doc)) {
indexViolated = arrItem;
break;
}
}
}
}
}
return indexViolated ? indexViolated.name() : false;
};
/**
* Creates an index on the specified keys.
* @param {Object} keys The object containing keys to index.
* @param {Object} options An options object.
* @returns {*}
*/
Collection.prototype.ensureIndex = function (keys, options) {
if (this._state === 'dropped') {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot operate in a dropped state!');
}
this._indexByName = this._indexByName || {};
this._indexById = this._indexById || {};
var index,
time = {
start: new Date().getTime()
};
if (options) {
switch (options.type) {
case 'hashed':
index = new IndexHashMap(keys, options, this);
break;
case 'btree':
index = new IndexBinaryTree(keys, options, this);
break;
default:
// Default
index = new IndexHashMap(keys, options, this);
break;
}
} else {
// Default
index = new IndexHashMap(keys, options, this);
}
// Check the index does not already exist
if (this._indexByName[index.name()]) {
// Index already exists
return {
err: 'Index with that name already exists'
};
}
if (this._indexById[index.id()]) {
// Index already exists
return {
err: 'Index with those keys already exists'
};
}
// Create the index
index.rebuild();
// Add the index
this._indexByName[index.name()] = index;
this._indexById[index.id()] = index;
time.end = new Date().getTime();
time.total = time.end - time.start;
this._lastOp = {
type: 'ensureIndex',
stats: {
time: time
}
};
return {
index: index,
id: index.id(),
name: index.name(),
state: index.state()
};
};
/**
* Gets an index by it's name.
* @param {String} name The name of the index to retreive.
* @returns {*}
*/
Collection.prototype.index = function (name) {
if (this._indexByName) {
return this._indexByName[name];
}
};
/**
* Gets the last reporting operation's details such as run time.
* @returns {Object}
*/
Collection.prototype.lastOp = function () {
return this._metrics.list();
};
/**
* Generates a difference object that contains insert, update and remove arrays
* representing the operations to execute to make this collection have the same
* data as the one passed.
* @param {Collection} collection The collection to diff against.
* @returns {{}}
*/
Collection.prototype.diff = function (collection) {
var diff = {
insert: [],
update: [],
remove: []
};
var pm = this.primaryKey(),
arr,
arrIndex,
arrItem,
arrCount;
// Check if the primary key index of each collection can be utilised
if (pm !== collection.primaryKey()) {
throw('ForerunnerDB.Collection "' + this.name() + '": Collection diffing requires that both collections have the same primary key!');
}
// Use the collection primary key index to do the diff (super-fast)
arr = collection._data;
// Check if we have an array or another collection
while (arr && !(arr instanceof Array)) {
// We don't have an array, assign collection and get data
collection = arr;
arr = collection._data;
}
arrCount = arr.length;
// Loop the collection's data array and check for matching items
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
arrItem = arr[arrIndex];
// Check for a matching item in this collection
if (this._primaryIndex.get(arrItem[pm])) {
// Matching item exists, check if the data is the same
if (this._primaryCrc.get(arrItem[pm]) !== collection._primaryCrc.get(arrItem[pm])) {
// The documents exist in both collections but data differs, update required
diff.update.push(arrItem);
}
} else {
// The document is missing from this collection, insert required
diff.insert.push(arrItem);
}
}
// Now loop this collection's data and check for matching items
arr = this._data;
arrCount = arr.length;
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
arrItem = arr[arrIndex];
if (!collection._primaryIndex.get(arrItem[pm])) {
// The document does not exist in the other collection, remove required
diff.remove.push(arrItem);
}
}
return diff;
};
Collection.prototype.collateAdd = function (collection, process) {
if (typeof collection === 'string') {
// The collection passed is a name, not a reference so get
// the reference from the name
collection = this._db.collection(collection, {
autoCreate: false,
throwError: false
});
}
if (collection) {
this._collate = this._collate || {};
this._collate[collection.name()] = new ReactorIO(collection, this, process);
return this;
} else {
throw('Cannot collate from a non-existent collection!');
}
};
Collection.prototype.collateRemove = function (collection) {
if (typeof collection === 'object') {
// We need to have the name of the collection to remove it
collection = collection.name();
}
if (collection) {
// Drop the reactor IO chain node
this._collate[collection].drop();
// Remove the collection data from the collate object
delete this._collate[collection];
return this;
} else {
throw('No collection name passed to collateRemove() or collection not found!');
}
};
Core.prototype.collection = new Overload({
/**
* Get a collection by name. If the collection does not already exist
* then one is created for that name automatically.
* @param {Object} options An options object.
* @returns {Collection}
*/
'object': function (options) {
return this.$main.call(this, options);
},
/**
* Get a collection by name. If the collection does not already exist
* then one is created for that name automatically.
* @param {String} collectionName The name of the collection.
* @returns {Collection}
*/
'string': function (collectionName) {
return this.$main.call(this, {
name: collectionName
});
},
/**
* Get a collection by name. If the collection does not already exist
* then one is created for that name automatically.
* @param {String} collectionName The name of the collection.
* @param {String} primaryKey Optional primary key to specify the primary key field on the collection
* objects. Defaults to "_id".
* @returns {Collection}
*/
'string, string': function (collectionName, primaryKey) {
return this.$main.call(this, {
name: collectionName,
primaryKey: primaryKey
});
},
/**
* Get a collection by name. If the collection does not already exist
* then one is created for that name automatically.
* @param {String} collectionName The name of the collection.
* @param {Object} options An options object.
* @returns {Collection}
*/
'string, object': function (collectionName, options) {
options.name = collectionName;
return this.$main.call(this, options);
},
/**
* Get a collection by name. If the collection does not already exist
* then one is created for that name automatically.
* @param {String} collectionName The name of the collection.
* @param {String} primaryKey Optional primary key to specify the primary key field on the collection
* objects. Defaults to "_id".
* @param {Object} options An options object.
* @returns {Collection}
*/
'string, string, object': function (collectionName, primaryKey, options) {
options.name = collectionName;
options.primaryKey = primaryKey;
return this.$main.call(this, options);
},
/**
* The main handler method. This get's called by all the other variants and
* handles the actual logic of the overloaded method.
* @param {Object} options An options object.
* @returns {*}
*/
'$main': function (options) {
var name = options.name;
if (name) {
if (!this._collection[name]) {
if (options && options.autoCreate === false) {
if (options && options.throwError !== false) {
throw('ForerunnerDB.Core "' + this.name() + '": Cannot get collection ' + name + ' because it does not exist and auto-create has been disabled!');
}
}
if (this.debug()) {
console.log('Creating collection ' + name);
}
}
this._collection[name] = this._collection[name] || new Collection(name).db(this);
if (options.primaryKey !== undefined) {
this._collection[name].primaryKey(options.primaryKey);
}
return this._collection[name];
} else {
if (!options || (options && options.throwError !== false)) {
throw('ForerunnerDB.Core "' + this.name() + '": Cannot get collection with undefined name!');
}
}
}
});
/**
* Determine if a collection with the passed name already exists.
* @param {String} viewName The name of the collection to check for.
* @returns {boolean}
*/
Core.prototype.collectionExists = function (viewName) {
return Boolean(this._collection[viewName]);
};
/**
* Returns an array of collections the DB currently has.
* @param {String|RegExp=} search The optional search string or regular expression to use
* to match collection names against.
* @returns {Array} An array of objects containing details of each collection
* the database is currently managing.
*/
Core.prototype.collections = function (search) {
var arr = [],
i;
if (search) {
if (!(search instanceof RegExp)) {
// Turn the search into a regular expression
search = new RegExp(search);
}
}
for (i in this._collection) {
if (this._collection.hasOwnProperty(i)) {
if (search) {
if (search.exec(i)) {
arr.push({
name: i,
count: this._collection[i].count()
});
}
} else {
arr.push({
name: i,
count: this._collection[i].count()
});
}
}
}
return arr;
};
Shared.finishModule('Collection');
module.exports = Collection;
},{"./Crc":6,"./IndexBinaryTree":9,"./IndexHashMap":10,"./KeyValueStore":11,"./Metrics":12,"./Overload":24,"./Path":26,"./ReactorIO":28,"./Shared":29}],4:[function(_dereq_,module,exports){
"use strict";
// Import external names locally
var Shared,
Core,
CoreInit,
Collection;
Shared = _dereq_('./Shared');
var CollectionGroup = function () {
this.init.apply(this, arguments);
};
CollectionGroup.prototype.init = function (name) {
var self = this;
self._name = name;
self._data = new Collection('__FDB__cg_data_' + self._name);
self._collections = [];
self._view = [];
};
Shared.addModule('CollectionGroup', CollectionGroup);
Shared.mixin(CollectionGroup.prototype, 'Mixin.Common');
Shared.mixin(CollectionGroup.prototype, 'Mixin.ChainReactor');
Shared.mixin(CollectionGroup.prototype, 'Mixin.Constants');
Shared.mixin(CollectionGroup.prototype, 'Mixin.Triggers');
Collection = _dereq_('./Collection');
Core = Shared.modules.Core;
CoreInit = Shared.modules.Core.prototype.init;
CollectionGroup.prototype.on = function () {
this._data.on.apply(this._data, arguments);
};
CollectionGroup.prototype.off = function () {
this._data.off.apply(this._data, arguments);
};
CollectionGroup.prototype.emit = function () {
this._data.emit.apply(this._data, arguments);
};
/**
* Gets / sets the primary key for this collection group.
* @param {String=} keyName The name of the primary key.
* @returns {*}
*/
CollectionGroup.prototype.primaryKey = function (keyName) {
if (keyName !== undefined) {
this._primaryKey = keyName;
return this;
}
return this._primaryKey;
};
/**
* Gets / sets the current state.
* @param {String=} val The name of the state to set.
* @returns {*}
*/
Shared.synthesize(CollectionGroup.prototype, 'state');
/**
* Gets / sets the db instance the collection group belongs to.
* @param {Core=} db The db instance.
* @returns {*}
*/
Shared.synthesize(CollectionGroup.prototype, 'db');
CollectionGroup.prototype.addCollection = function (collection) {
if (collection) {
if (this._collections.indexOf(collection) === -1) {
//var self = this;
// Check for compatible primary keys
if (this._collections.length) {
if (this._primaryKey !== collection.primaryKey()) {
throw('ForerunnerDB.CollectionGroup "' + this.name() + '": All collections in a collection group must have the same primary key!');
}
} else {
// Set the primary key to the first collection added
this.primaryKey(collection.primaryKey());
}
// Add the collection
this._collections.push(collection);
collection._groups = collection._groups || [];
collection._groups.push(this);
collection.chain(this);
// Hook the collection's drop event to destroy group data
collection.on('drop', function () {<|fim▁hole|> // Remove collection from any group associations
if (collection._groups && collection._groups.length) {
var groupArr = [],
i;
// Copy the group array because if we call removeCollection on a group
// it will alter the groups array of this collection mid-loop!
for (i = 0; i < collection._groups.length; i++) {
groupArr.push(collection._groups[i]);
}
// Loop any groups we are part of and remove ourselves from them
for (i = 0; i < groupArr.length; i++) {
collection._groups[i].removeCollection(collection);
}
}
delete collection._groups;
});
// Add collection's data
this._data.insert(collection.find());
}
}
return this;
};
CollectionGroup.prototype.removeCollection = function (collection) {
if (collection) {
var collectionIndex = this._collections.indexOf(collection),
groupIndex;
if (collectionIndex !== -1) {
collection.unChain(this);
this._collections.splice(collectionIndex, 1);
collection._groups = collection._groups || [];
groupIndex = collection._groups.indexOf(this);
if (groupIndex !== -1) {
collection._groups.splice(groupIndex, 1);
}
collection.off('drop');
}
if (this._collections.length === 0) {
// Wipe the primary key
delete this._primaryKey;
}
}
return this;
};
CollectionGroup.prototype._chainHandler = function (chainPacket) {
//sender = chainPacket.sender;
switch (chainPacket.type) {
case 'setData':
// Decouple the data to ensure we are working with our own copy
chainPacket.data = this.decouple(chainPacket.data);
// Remove old data
this._data.remove(chainPacket.options.oldData);
// Add new data
this._data.insert(chainPacket.data);
break;
case 'insert':
// Decouple the data to ensure we are working with our own copy
chainPacket.data = this.decouple(chainPacket.data);
// Add new data
this._data.insert(chainPacket.data);
break;
case 'update':
// Update data
this._data.update(chainPacket.data.query, chainPacket.data.update, chainPacket.options);
break;
case 'remove':
this._data.remove(chainPacket.data.query, chainPacket.options);
break;
default:
break;
}
};
CollectionGroup.prototype.insert = function () {
this._collectionsRun('insert', arguments);
};
CollectionGroup.prototype.update = function () {
this._collectionsRun('update', arguments);
};
CollectionGroup.prototype.updateById = function () {
this._collectionsRun('updateById', arguments);
};
CollectionGroup.prototype.remove = function () {
this._collectionsRun('remove', arguments);
};
CollectionGroup.prototype._collectionsRun = function (type, args) {
for (var i = 0; i < this._collections.length; i++) {
this._collections[i][type].apply(this._collections[i], args);
}
};
CollectionGroup.prototype.find = function (query, options) {
return this._data.find(query, options);
};
/**
* Helper method that removes a document that matches the given id.
* @param {String} id The id of the document to remove.
*/
CollectionGroup.prototype.removeById = function (id) {
// Loop the collections in this group and apply the remove
for (var i = 0; i < this._collections.length; i++) {
this._collections[i].removeById(id);
}
};
/**
* Uses the passed query to generate a new collection with results
* matching the query parameters.
*
* @param query
* @param options
* @returns {*}
*/
CollectionGroup.prototype.subset = function (query, options) {
var result = this.find(query, options);
return new Collection()
._subsetOf(this)
.primaryKey(this._primaryKey)
.setData(result);
};
/**
* Drops a collection group from the database.
* @returns {boolean} True on success, false on failure.
*/
CollectionGroup.prototype.drop = function () {
if (this._state !== 'dropped') {
var i,
collArr,
viewArr;
if (this._debug) {
console.log('Dropping collection group ' + this._name);
}
this._state = 'dropped';
if (this._collections && this._collections.length) {
collArr = [].concat(this._collections);
for (i = 0; i < collArr.length; i++) {
this.removeCollection(collArr[i]);
}
}
if (this._view && this._view.length) {
viewArr = [].concat(this._view);
for (i = 0; i < viewArr.length; i++) {
this._removeView(viewArr[i]);
}
}
this.emit('drop', this);
}
return true;
};
// Extend DB to include collection groups
Core.prototype.init = function () {
this._collectionGroup = {};
CoreInit.apply(this, arguments);
};
Core.prototype.collectionGroup = function (collectionGroupName) {
if (collectionGroupName) {
this._collectionGroup[collectionGroupName] = this._collectionGroup[collectionGroupName] || new CollectionGroup(collectionGroupName).db(this);
return this._collectionGroup[collectionGroupName];
} else {
// Return an object of collection data
return this._collectionGroup;
}
};
/**
* Returns an array of collection groups the DB currently has.
* @returns {Array} An array of objects containing details of each collection group
* the database is currently managing.
*/
Core.prototype.collectionGroups = function () {
var arr = [],
i;
for (i in this._collectionGroup) {
if (this._collectionGroup.hasOwnProperty(i)) {
arr.push({
name: i
});
}
}
return arr;
};
module.exports = CollectionGroup;
},{"./Collection":3,"./Shared":29}],5:[function(_dereq_,module,exports){
/*
License
Copyright (c) 2015 Irrelon Software Limited
http://www.irrelon.com
http://www.forerunnerdb.com
Please visit the license page to see latest license information:
http://www.forerunnerdb.com/licensing.html
*/
"use strict";
var Shared,
Collection,
Metrics,
Crc,
Overload;
Shared = _dereq_('./Shared');
Overload = _dereq_('./Overload');
/**
* The main ForerunnerDB core object.
* @constructor
*/
var Core = function (name) {
this.init.apply(this, arguments);
};
Core.prototype.init = function (name) {
this._primaryKey = '_id';
this._name = name;
this._collection = {};
this._debug = {};
};
Core.prototype.moduleLoaded = new Overload({
/**
* Checks if a module has been loaded into the database.
* @param {String} moduleName The name of the module to check for.
* @returns {Boolean} True if the module is loaded, false if not.
*/
'string': function (moduleName) {
if (moduleName !== undefined) {
moduleName = moduleName.replace(/ /g, '');
var modules = moduleName.split(','),
index;
for (index = 0; index < modules.length; index++) {
if (!Shared.modules[modules[index]]) {
return false;
}
}
return true;
}
return false;
},
/**
* Checks if a module is loaded and if so calls the passed
* callback method.
* @param {String} moduleName The name of the module to check for.
* @param {Function} callback The callback method to call if module is loaded.
*/
'string, function': function (moduleName, callback) {
if (moduleName !== undefined) {
moduleName = moduleName.replace(/ /g, '');
var modules = moduleName.split(','),
index;
for (index = 0; index < modules.length; index++) {
if (!Shared.modules[modules[index]]) {
return false;
}
}
callback();
}
},
/**
* Checks if a module is loaded and if so calls the passed
* success method, otherwise calls the failure method.
* @param {String} moduleName The name of the module to check for.
* @param {Function} success The callback method to call if module is loaded.
* @param {Function} failure The callback method to call if module not loaded.
*/
'string, function, function': function (moduleName, success, failure) {
if (moduleName !== undefined) {
moduleName = moduleName.replace(/ /g, '');
var modules = moduleName.split(','),
index;
for (index = 0; index < modules.length; index++) {
if (!Shared.modules[modules[index]]) {
failure();
return false;
}
}
success();
}
}
});
/**
* Checks version against the string passed and if it matches (or partially matches)
* then the callback is called.
* @param {String} val The version to check against.
* @param {Function} callback The callback to call if match is true.
* @returns {Boolean}
*/
Core.prototype.version = function (val, callback) {
if (val !== undefined) {
if (Shared.version.indexOf(val) === 0) {
if (callback) { callback(); }
return true;
}
return false;
}
return Shared.version;
};
// Expose moduleLoaded method to non-instantiated object ForerunnerDB
Core.moduleLoaded = Core.prototype.moduleLoaded;
// Expose version method to non-instantiated object ForerunnerDB
Core.version = Core.prototype.version;
// Provide public access to the Shared object
Core.shared = Shared;
Core.prototype.shared = Shared;
Shared.addModule('Core', Core);
Shared.mixin(Core.prototype, 'Mixin.Common');
Shared.mixin(Core.prototype, 'Mixin.ChainReactor');
Shared.mixin(Core.prototype, 'Mixin.Constants');
Collection = _dereq_('./Collection.js');
Metrics = _dereq_('./Metrics.js');
Crc = _dereq_('./Crc.js');
Core.prototype._isServer = false;
/**
* Gets / sets the default primary key for new collections.
* @param {String=} val The name of the primary key to set.
* @returns {*}
*/
Shared.synthesize(Core.prototype, 'primaryKey');
/**
* Gets / sets the current state.
* @param {String=} val The name of the state to set.
* @returns {*}
*/
Shared.synthesize(Core.prototype, 'state');
/**
* Gets / sets the name of the database.
* @param {String=} val The name of the database to set.
* @returns {*}
*/
Shared.synthesize(Core.prototype, 'name');
/**
* Returns true if ForerunnerDB is running on a client browser.
* @returns {boolean}
*/
Core.prototype.isClient = function () {
return !this._isServer;
};
/**
* Returns true if ForerunnerDB is running on a server.
* @returns {boolean}
*/
Core.prototype.isServer = function () {
return this._isServer;
};
/**
* Returns a checksum of a string.
* @param {String} string The string to checksum.
* @return {String} The checksum generated.
*/
Core.prototype.crc = Crc;
/**
* Checks if the database is running on a client (browser) or
* a server (node.js).
* @returns {Boolean} Returns true if running on a browser.
*/
Core.prototype.isClient = function () {
return !this._isServer;
};
/**
* Checks if the database is running on a client (browser) or
* a server (node.js).
* @returns {Boolean} Returns true if running on a server.
*/
Core.prototype.isServer = function () {
return this._isServer;
};
/**
* Converts a normal javascript array of objects into a DB collection.
* @param {Array} arr An array of objects.
* @returns {Collection} A new collection instance with the data set to the
* array passed.
*/
Core.prototype.arrayToCollection = function (arr) {
return new Collection().setData(arr);
};
/**
* Registers an event listener against an event name.
* @param {String} event The name of the event to listen for.
* @param {Function} listener The listener method to call when
* the event is fired.
* @returns {*}
*/
Core.prototype.on = function(event, listener) {
this._listeners = this._listeners || {};
this._listeners[event] = this._listeners[event] || [];
this._listeners[event].push(listener);
return this;
};
/**
* De-registers an event listener from an event name.
* @param {String} event The name of the event to stop listening for.
* @param {Function} listener The listener method passed to on() when
* registering the event listener.
* @returns {*}
*/
Core.prototype.off = function(event, listener) {
if (event in this._listeners) {
var arr = this._listeners[event],
index = arr.indexOf(listener);
if (index > -1) {
arr.splice(index, 1);
}
}
return this;
};
/**
* Emits an event by name with the given data.
* @param {String} event The name of the event to emit.
* @param {*=} data The data to emit with the event.
* @returns {*}
*/
Core.prototype.emit = function(event, data) {
this._listeners = this._listeners || {};
if (event in this._listeners) {
var arr = this._listeners[event],
arrCount = arr.length,
arrIndex;
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
arr[arrIndex].apply(this, Array.prototype.slice.call(arguments, 1));
}
}
return this;
};
/**
* Find all documents across all collections in the database that match the passed
* string or search object.
* @param search String or search object.
* @returns {Array}
*/
Core.prototype.peek = function (search) {
var i,
coll,
arr = [],
typeOfSearch = typeof search;
// Loop collections
for (i in this._collection) {
if (this._collection.hasOwnProperty(i)) {
coll = this._collection[i];
if (typeOfSearch === 'string') {
arr = arr.concat(coll.peek(search));
} else {
arr = arr.concat(coll.find(search));
}
}
}
return arr;
};
/**
* Find all documents across all collections in the database that match the passed
* string or search object and return them in an object where each key is the name
* of the collection that the document was matched in.
* @param search String or search object.
* @returns {object}
*/
Core.prototype.peekCat = function (search) {
var i,
coll,
cat = {},
arr,
typeOfSearch = typeof search;
// Loop collections
for (i in this._collection) {
if (this._collection.hasOwnProperty(i)) {
coll = this._collection[i];
if (typeOfSearch === 'string') {
arr = coll.peek(search);
if (arr && arr.length) {
cat[coll.name()] = arr;
}
} else {
arr = coll.find(search);
if (arr && arr.length) {
cat[coll.name()] = arr;
}
}
}
}
return cat;
};
/**
* Drops all collections in the database.
* @param {Function=} callback Optional callback method.
*/
Core.prototype.drop = function (callback) {
if (this._state !== 'dropped') {
var arr = this.collections(),
arrCount = arr.length,
arrIndex,
finishCount = 0,
afterDrop = function () {
finishCount++;
if (finishCount === arrCount) {
if (callback) {
callback();
}
}
};
this._state = 'dropped';
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
this.collection(arr[arrIndex].name).drop(afterDrop);
delete this._collection[arr[arrIndex].name];
}
this.emit('drop', this);
}
return true;
};
module.exports = Core;
},{"./Collection.js":3,"./Crc.js":6,"./Metrics.js":12,"./Overload":24,"./Shared":29}],6:[function(_dereq_,module,exports){
"use strict";
var crcTable = (function () {
var crcTable = [],
c, n, k;
for (n = 0; n < 256; n++) {
c = n;
for (k = 0; k < 8; k++) {
c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); // jshint ignore:line
}
crcTable[n] = c;
}
return crcTable;
}());
module.exports = function(str) {
var crc = 0 ^ (-1), // jshint ignore:line
i;
for (i = 0; i < str.length; i++) {
crc = (crc >>> 8) ^ crcTable[(crc ^ str.charCodeAt(i)) & 0xFF]; // jshint ignore:line
}
return (crc ^ (-1)) >>> 0; // jshint ignore:line
};
},{}],7:[function(_dereq_,module,exports){
"use strict";
var Shared,
Collection,
Core,
CoreInit;
Shared = _dereq_('./Shared');
(function init () {
var Document = function () {
this.init.apply(this, arguments);
};
Document.prototype.init = function (name) {
this._name = name;
this._data = {};
};
Shared.addModule('Document', Document);
Shared.mixin(Document.prototype, 'Mixin.Common');
Shared.mixin(Document.prototype, 'Mixin.Events');
Shared.mixin(Document.prototype, 'Mixin.ChainReactor');
Shared.mixin(Document.prototype, 'Mixin.Constants');
Shared.mixin(Document.prototype, 'Mixin.Triggers');
Collection = _dereq_('./Collection');
Core = Shared.modules.Core;
CoreInit = Shared.modules.Core.prototype.init;
/**
* Gets / sets the current state.
* @param {String=} val The name of the state to set.
* @returns {*}
*/
Shared.synthesize(Document.prototype, 'state');
/**
* Gets / sets the db instance this class instance belongs to.
* @param {Core=} db The db instance.
* @returns {*}
*/
Shared.synthesize(Document.prototype, 'db');
/**
* Gets / sets the document name.
* @param {String=} val The name to assign
* @returns {*}
*/
Shared.synthesize(Document.prototype, 'name');
Document.prototype.setData = function (data) {
var i,
$unset;
if (data) {
data = this.decouple(data);
if (this._linked) {
$unset = {};
// Remove keys that don't exist in the new data from the current object
for (i in this._data) {
if (i.substr(0, 6) !== 'jQuery' && this._data.hasOwnProperty(i)) {
// Check if existing data has key
if (data[i] === undefined) {
// Add property name to those to unset
$unset[i] = 1;
}
}
}
data.$unset = $unset;
// Now update the object with new data
this.updateObject(this._data, data, {});
} else {
// Straight data assignment
this._data = data;
}
}
return this;
};
/**
* Modifies the document. This will update the document with the data held in 'update'.
*
* @param {Object} query The query that must be matched for a document to be
* operated on.
* @param {Object} update The object containing updated key/values. Any keys that
* match keys on the existing document will be overwritten with this data. Any
* keys that do not currently exist on the document will be added to the document.
* @param {Object=} options An options object.
* @returns {Array} The items that were updated.
*/
Document.prototype.update = function (query, update, options) {
this.updateObject(this._data, update, query, options);
};
/**
* Internal method for document updating.
* @param {Object} doc The document to update.
* @param {Object} update The object with key/value pairs to update the document with.
* @param {Object} query The query object that we need to match to perform an update.
* @param {Object} options An options object.
* @param {String} path The current recursive path.
* @param {String} opType The type of update operation to perform, if none is specified
* default is to set new data against matching fields.
* @returns {Boolean} True if the document was updated with new / changed data or
* false if it was not updated because the data was the same.
* @private
*/
Document.prototype.updateObject = Collection.prototype.updateObject;
/**
* Determines if the passed key has an array positional mark (a dollar at the end
* of its name).
* @param {String} key The key to check.
* @returns {Boolean} True if it is a positional or false if not.
* @private
*/
Document.prototype._isPositionalKey = function (key) {
return key.substr(key.length - 2, 2) === '.$';
};
/**
* Updates a property on an object depending on if the collection is
* currently running data-binding or not.
* @param {Object} doc The object whose property is to be updated.
* @param {String} prop The property to update.
* @param {*} val The new value of the property.
* @private
*/
Document.prototype._updateProperty = function (doc, prop, val) {
if (this._linked) {
window.jQuery.observable(doc).setProperty(prop, val);
if (this.debug()) {
console.log('ForerunnerDB.Document: Setting data-bound document property "' + prop + '" for collection "' + this.name() + '"');
}
} else {
doc[prop] = val;
if (this.debug()) {
console.log('ForerunnerDB.Document: Setting non-data-bound document property "' + prop + '" for collection "' + this.name() + '"');
}
}
};
/**
* Increments a value for a property on a document by the passed number.
* @param {Object} doc The document to modify.
* @param {String} prop The property to modify.
* @param {Number} val The amount to increment by.
* @private
*/
Document.prototype._updateIncrement = function (doc, prop, val) {
if (this._linked) {
window.jQuery.observable(doc).setProperty(prop, doc[prop] + val);
} else {
doc[prop] += val;
}
};
/**
* Changes the index of an item in the passed array.
* @param {Array} arr The array to modify.
* @param {Number} indexFrom The index to move the item from.
* @param {Number} indexTo The index to move the item to.
* @private
*/
Document.prototype._updateSpliceMove = function (arr, indexFrom, indexTo) {
if (this._linked) {
window.jQuery.observable(arr).move(indexFrom, indexTo);
if (this.debug()) {
console.log('ForerunnerDB.Document: Moving data-bound document array index from "' + indexFrom + '" to "' + indexTo + '" for collection "' + this.name() + '"');
}
} else {
arr.splice(indexTo, 0, arr.splice(indexFrom, 1)[0]);
if (this.debug()) {
console.log('ForerunnerDB.Document: Moving non-data-bound document array index from "' + indexFrom + '" to "' + indexTo + '" for collection "' + this.name() + '"');
}
}
};
/**
* Inserts an item into the passed array at the specified index.
* @param {Array} arr The array to insert into.
* @param {Number} index The index to insert at.
* @param {Object} doc The document to insert.
* @private
*/
Document.prototype._updateSplicePush = function (arr, index, doc) {
if (arr.length > index) {
if (this._linked) {
window.jQuery.observable(arr).insert(index, doc);
} else {
arr.splice(index, 0, doc);
}
} else {
if (this._linked) {
window.jQuery.observable(arr).insert(doc);
} else {
arr.push(doc);
}
}
};
/**
* Inserts an item at the end of an array.
* @param {Array} arr The array to insert the item into.
* @param {Object} doc The document to insert.
* @private
*/
Document.prototype._updatePush = function (arr, doc) {
if (this._linked) {
window.jQuery.observable(arr).insert(doc);
} else {
arr.push(doc);
}
};
/**
* Removes an item from the passed array.
* @param {Array} arr The array to modify.
* @param {Number} index The index of the item in the array to remove.
* @private
*/
Document.prototype._updatePull = function (arr, index) {
if (this._linked) {
window.jQuery.observable(arr).remove(index);
} else {
arr.splice(index, 1);
}
};
/**
* Multiplies a value for a property on a document by the passed number.
* @param {Object} doc The document to modify.
* @param {String} prop The property to modify.
* @param {Number} val The amount to multiply by.
* @private
*/
Document.prototype._updateMultiply = function (doc, prop, val) {
if (this._linked) {
window.jQuery.observable(doc).setProperty(prop, doc[prop] * val);
} else {
doc[prop] *= val;
}
};
/**
* Renames a property on a document to the passed property.
* @param {Object} doc The document to modify.
* @param {String} prop The property to rename.
* @param {Number} val The new property name.
* @private
*/
Document.prototype._updateRename = function (doc, prop, val) {
var existingVal = doc[prop];
if (this._linked) {
window.jQuery.observable(doc).setProperty(val, existingVal);
window.jQuery.observable(doc).removeProperty(prop);
} else {
doc[val] = existingVal;
delete doc[prop];
}
};
/**
* Deletes a property on a document.
* @param {Object} doc The document to modify.
* @param {String} prop The property to delete.
* @private
*/
Document.prototype._updateUnset = function (doc, prop) {
if (this._linked) {
window.jQuery.observable(doc).removeProperty(prop);
} else {
delete doc[prop];
}
};
/**
* Deletes a property on a document.
* @param {Object} doc The document to modify.
* @param {*} val The property to delete.
* @return {Boolean}
* @private
*/
Document.prototype._updatePop = function (doc, val) {
var index,
updated = false;
if (doc.length > 0) {
if (this._linked) {
if (val === 1) {
index = doc.length - 1;
} else if (val === -1) {
index = 0;
}
if (index > -1) {
window.jQuery.observable(doc).remove(index);
updated = true;
}
} else {
if (val === 1) {
doc.pop();
updated = true;
} else if (val === -1) {
doc.shift();
updated = true;
}
}
}
return updated;
};
Document.prototype.drop = function () {
if (this._state !== 'dropped') {
if (this._db && this._name) {
if (this._db && this._db._document && this._db._document[this._name]) {
this._state = 'dropped';
delete this._db._document[this._name];
delete this._data;
this.emit('drop', this);
return true;
}
}
} else {
return true;
}
return false;
};
// Extend DB to include documents
Core.prototype.init = function () {
CoreInit.apply(this, arguments);
};
Core.prototype.document = function (documentName) {
if (documentName) {
this._document = this._document || {};
this._document[documentName] = this._document[documentName] || new Document(documentName).db(this);
return this._document[documentName];
} else {
// Return an object of document data
return this._document;
}
};
/**
* Returns an array of documents the DB currently has.
* @returns {Array} An array of objects containing details of each document
* the database is currently managing.
*/
Core.prototype.documents = function () {
var arr = [],
i;
for (i in this._document) {
if (this._document.hasOwnProperty(i)) {
arr.push({
name: i
});
}
}
return arr;
};
Shared.finishModule('Document');
module.exports = Document;
}());
},{"./Collection":3,"./Shared":29}],8:[function(_dereq_,module,exports){
"use strict";
// Import external names locally
var Shared,
Collection,
CollectionInit,
Overload;
Shared = _dereq_('./Shared');
Overload = _dereq_('./Overload');
/**
* The constructor.
*
* @constructor
*/
var Highchart = function (collection, options) {
this.init.apply(this, arguments);
};
Highchart.prototype.init = function (collection, options) {
this._options = options;
this._selector = window.jQuery(this._options.selector);
if (!this._selector[0]) {
throw('ForerunnerDB.Highchart "' + collection.name() + '": Chart target element does not exist via selector: ' + this._options.selector);
}
this._listeners = {};
this._collection = collection;
// Setup the chart
this._options.series = [];
// Disable attribution on highcharts
options.chartOptions = options.chartOptions || {};
options.chartOptions.credits = false;
// Set the data for the chart
var data,
seriesObj,
chartData;
switch (this._options.type) {
case 'pie':
// Create chart from data
this._selector.highcharts(this._options.chartOptions);
this._chart = this._selector.highcharts();
// Generate graph data from collection data
data = this._collection.find();
seriesObj = {
allowPointSelect: true,
cursor: 'pointer',
dataLabels: {
enabled: true,
format: '<b>{point.name}</b>: {y} ({point.percentage:.0f}%)',
style: {
color: (window.Highcharts.theme && window.Highcharts.theme.contrastTextColor) || 'black'
}
}
};
chartData = this.pieDataFromCollectionData(data, this._options.keyField, this._options.valField);
window.jQuery.extend(seriesObj, this._options.seriesOptions);
window.jQuery.extend(seriesObj, {
name: this._options.seriesName,
data: chartData
});
this._chart.addSeries(seriesObj, true, true);
break;
case 'line':
case 'area':
case 'column':
case 'bar':
// Generate graph data from collection data
chartData = this.seriesDataFromCollectionData(
this._options.seriesField,
this._options.keyField,
this._options.valField,
this._options.orderBy
);
this._options.chartOptions.xAxis = chartData.xAxis;
this._options.chartOptions.series = chartData.series;
this._selector.highcharts(this._options.chartOptions);
this._chart = this._selector.highcharts();
break;
default:
throw('ForerunnerDB.Highchart "' + collection.name() + '": Chart type specified is not currently supported by ForerunnerDB: ' + this._options.type);
}
// Hook the collection events to auto-update the chart
this._hookEvents();
};
Shared.addModule('Highchart', Highchart);
Collection = Shared.modules.Collection;
CollectionInit = Collection.prototype.init;
Shared.mixin(Highchart.prototype, 'Mixin.Events');
/**
* Gets / sets the current state.
* @param {String=} val The name of the state to set.
* @returns {*}
*/
Shared.synthesize(Highchart.prototype, 'state');
/**
* Generate pie-chart series data from the given collection data array.
* @param data
* @param keyField
* @param valField
* @returns {Array}
*/
Highchart.prototype.pieDataFromCollectionData = function (data, keyField, valField) {
var graphData = [],
i;
for (i = 0; i < data.length; i++) {
graphData.push([data[i][keyField], data[i][valField]]);
}
return graphData;
};
/**
* Generate line-chart series data from the given collection data array.
* @param seriesField
* @param keyField
* @param valField
* @param orderBy
*/
Highchart.prototype.seriesDataFromCollectionData = function (seriesField, keyField, valField, orderBy) {
var data = this._collection.distinct(seriesField),
seriesData = [],
xAxis = {
categories: []
},
seriesName,
query,
dataSearch,
seriesValues,
i, k;
// What we WANT to output:
/*series: [{
name: 'Responses',
data: [7.0, 6.9, 9.5, 14.5, 18.2, 21.5, 25.2, 26.5, 23.3, 18.3, 13.9, 9.6]
}]*/
// Loop keys
for (i = 0; i < data.length; i++) {
seriesName = data[i];
query = {};
query[seriesField] = seriesName;
seriesValues = [];
dataSearch = this._collection.find(query, {
orderBy: orderBy
});
// Loop the keySearch data and grab the value for each item
for (k = 0; k < dataSearch.length; k++) {
xAxis.categories.push(dataSearch[k][keyField]);
seriesValues.push(dataSearch[k][valField]);
}
seriesData.push({
name: seriesName,
data: seriesValues
});
}
return {
xAxis: xAxis,
series: seriesData
};
};
/**
* Hook the events the chart needs to know about from the internal collection.
* @private
*/
Highchart.prototype._hookEvents = function () {
var self = this;
self._collection.on('change', function () { self._changeListener.apply(self, arguments); });
// If the collection is dropped, clean up after ourselves
self._collection.on('drop', function () { self.drop.apply(self, arguments); });
};
/**
* Handles changes to the collection data that the chart is reading from and then
* updates the data in the chart display.
* @private
*/
Highchart.prototype._changeListener = function () {
var self = this;
// Update the series data on the chart
if(typeof self._collection !== 'undefined' && self._chart) {
var data = self._collection.find(),
i;
switch (self._options.type) {
case 'pie':
self._chart.series[0].setData(
self.pieDataFromCollectionData(
data,
self._options.keyField,
self._options.valField
),
true,
true
);
break;
case 'bar':
case 'line':
case 'area':
case 'column':
var seriesData = self.seriesDataFromCollectionData(
self._options.seriesField,
self._options.keyField,
self._options.valField,
self._options.orderBy
);
self._chart.xAxis[0].setCategories(
seriesData.xAxis.categories
);
for (i = 0; i < seriesData.series.length; i++) {
if (self._chart.series[i]) {
// Series exists, set it's data
self._chart.series[i].setData(
seriesData.series[i].data,
true,
true
);
} else {
// Series data does not yet exist, add a new series
self._chart.addSeries(
seriesData.series[i],
true,
true
);
}
}
break;
default:
break;
}
}
};
/**
* Destroys the chart and all internal references.
* @returns {Boolean}
*/
Highchart.prototype.drop = function () {
if (this._state !== 'dropped') {
this._state = 'dropped';
if (this._chart) {
this._chart.destroy();
}
if (this._collection) {
this._collection.off('change', this._changeListener);
this._collection.off('drop', this.drop);
if (this._collection._highcharts) {
delete this._collection._highcharts[this._options.selector];
}
}
delete this._chart;
delete this._options;
delete this._collection;
this.emit('drop', this);
return true;
} else {
return true;
}
};
// Extend collection with highchart init
Collection.prototype.init = function () {
this._highcharts = {};
CollectionInit.apply(this, arguments);
};
/**
* Creates a pie chart from the collection.
* @type {Overload}
*/
Collection.prototype.pieChart = new Overload({
/**
* Chart via options object.
* @param {Object} options The options object.
* @returns {*}
*/
'object': function (options) {
options.type = 'pie';
options.chartOptions = options.chartOptions || {};
options.chartOptions.chart = options.chartOptions.chart || {};
options.chartOptions.chart.type = 'pie';
if (!this._highcharts[options.selector]) {
// Store new chart in charts array
this._highcharts[options.selector] = new Highchart(this, options);
}
return this._highcharts[options.selector];
},
/**
* Chart via defined params and an options object.
* @param {String|jQuery} selector The element to render the chart to.
* @param {String} keyField The field to use as the data key.
* @param {String} valField The field to use as the data value.
* @param {String} seriesName The name of the series to display on the chart.
* @param {Object} options The options object.
*/
'*, string, string, string, ...': function (selector, keyField, valField, seriesName, options) {
options = options || {};
options.selector = selector;
options.keyField = keyField;
options.valField = valField;
options.seriesName = seriesName;
// Call the main chart method
this.pieChart(options);
}
});
/**
* Creates a line chart from the collection.
* @type {Overload}
*/
Collection.prototype.lineChart = new Overload({
/**
* Chart via options object.
* @param {Object} options The options object.
* @returns {*}
*/
'object': function (options) {
options.type = 'line';
options.chartOptions = options.chartOptions || {};
options.chartOptions.chart = options.chartOptions.chart || {};
options.chartOptions.chart.type = 'line';
if (!this._highcharts[options.selector]) {
// Store new chart in charts array
this._highcharts[options.selector] = new Highchart(this, options);
}
return this._highcharts[options.selector];
},
/**
* Chart via defined params and an options object.
* @param {String|jQuery} selector The element to render the chart to.
* @param {String} seriesField The name of the series to plot.
* @param {String} keyField The field to use as the data key.
* @param {String} valField The field to use as the data value.
* @param {Object} options The options object.
*/
'*, string, string, string, ...': function (selector, seriesField, keyField, valField, options) {
options = options || {};
options.seriesField = seriesField;
options.selector = selector;
options.keyField = keyField;
options.valField = valField;
// Call the main chart method
this.lineChart(options);
}
});
/**
* Creates an area chart from the collection.
* @type {Overload}
*/
Collection.prototype.areaChart = new Overload({
/**
* Chart via options object.
* @param {Object} options The options object.
* @returns {*}
*/
'object': function (options) {
options.type = 'area';
options.chartOptions = options.chartOptions || {};
options.chartOptions.chart = options.chartOptions.chart || {};
options.chartOptions.chart.type = 'area';
if (!this._highcharts[options.selector]) {
// Store new chart in charts array
this._highcharts[options.selector] = new Highchart(this, options);
}
return this._highcharts[options.selector];
},
/**
* Chart via defined params and an options object.
* @param {String|jQuery} selector The element to render the chart to.
* @param {String} seriesField The name of the series to plot.
* @param {String} keyField The field to use as the data key.
* @param {String} valField The field to use as the data value.
* @param {Object} options The options object.
*/
'*, string, string, string, ...': function (selector, seriesField, keyField, valField, options) {
options = options || {};
options.seriesField = seriesField;
options.selector = selector;
options.keyField = keyField;
options.valField = valField;
// Call the main chart method
this.areaChart(options);
}
});
/**
* Creates a column chart from the collection.
* @type {Overload}
*/
Collection.prototype.columnChart = new Overload({
/**
* Chart via options object.
* @param {Object} options The options object.
* @returns {*}
*/
'object': function (options) {
options.type = 'column';
options.chartOptions = options.chartOptions || {};
options.chartOptions.chart = options.chartOptions.chart || {};
options.chartOptions.chart.type = 'column';
if (!this._highcharts[options.selector]) {
// Store new chart in charts array
this._highcharts[options.selector] = new Highchart(this, options);
}
return this._highcharts[options.selector];
},
/**
* Chart via defined params and an options object.
* @param {String|jQuery} selector The element to render the chart to.
* @param {String} seriesField The name of the series to plot.
* @param {String} keyField The field to use as the data key.
* @param {String} valField The field to use as the data value.
* @param {Object} options The options object.
*/
'*, string, string, string, ...': function (selector, seriesField, keyField, valField, options) {
options = options || {};
options.seriesField = seriesField;
options.selector = selector;
options.keyField = keyField;
options.valField = valField;
// Call the main chart method
this.columnChart(options);
}
});
/**
* Creates a bar chart from the collection.
* @type {Overload}
*/
Collection.prototype.barChart = new Overload({
/**
* Chart via options object.
* @param {Object} options The options object.
* @returns {*}
*/
'object': function (options) {
options.type = 'bar';
options.chartOptions = options.chartOptions || {};
options.chartOptions.chart = options.chartOptions.chart || {};
options.chartOptions.chart.type = 'bar';
if (!this._highcharts[options.selector]) {
// Store new chart in charts array
this._highcharts[options.selector] = new Highchart(this, options);
}
return this._highcharts[options.selector];
},
/**
* Chart via defined params and an options object.
* @param {String|jQuery} selector The element to render the chart to.
* @param {String} seriesField The name of the series to plot.
* @param {String} keyField The field to use as the data key.
* @param {String} valField The field to use as the data value.
* @param {Object} options The options object.
*/
'*, string, string, string, ...': function (selector, seriesField, keyField, valField, options) {
options = options || {};
options.seriesField = seriesField;
options.selector = selector;
options.keyField = keyField;
options.valField = valField;
// Call the main chart method
this.barChart(options);
}
});
/**
* Creates a stacked bar chart from the collection.
* @type {Overload}
*/
Collection.prototype.stackedBarChart = new Overload({
/**
* Chart via options object.
* @param {Object} options The options object.
* @returns {*}
*/
'object': function (options) {
options.type = 'bar';
options.chartOptions = options.chartOptions || {};
options.chartOptions.chart = options.chartOptions.chart || {};
options.chartOptions.chart.type = 'bar';
options.plotOptions = options.plotOptions || {};
options.plotOptions.series = options.plotOptions.series || {};
options.plotOptions.series.stacking = options.plotOptions.series.stacking || 'normal';
if (!this._highcharts[options.selector]) {
// Store new chart in charts array
this._highcharts[options.selector] = new Highchart(this, options);
}
return this._highcharts[options.selector];
},
/**
* Chart via defined params and an options object.
* @param {String|jQuery} selector The element to render the chart to.
* @param {String} seriesField The name of the series to plot.
* @param {String} keyField The field to use as the data key.
* @param {String} valField The field to use as the data value.
* @param {Object} options The options object.
*/
'*, string, string, string, ...': function (selector, seriesField, keyField, valField, options) {
options = options || {};
options.seriesField = seriesField;
options.selector = selector;
options.keyField = keyField;
options.valField = valField;
// Call the main chart method
this.stackedBarChart(options);
}
});
/**
* Removes a chart from the page by it's selector.
* @param {String} selector The chart selector.
*/
Collection.prototype.dropChart = function (selector) {
if (this._highcharts && this._highcharts[selector]) {
this._highcharts[selector].drop();
}
};
Shared.finishModule('Highchart');
module.exports = Highchart;
},{"./Overload":24,"./Shared":29}],9:[function(_dereq_,module,exports){
"use strict";
/*
name
id
rebuild
state
match
lookup
*/
var Shared = _dereq_('./Shared'),
Path = _dereq_('./Path'),
btree = function () {};
/**
* The index class used to instantiate hash map indexes that the database can
* use to speed up queries on collections and views.
* @constructor
*/
var IndexBinaryTree = function () {
this.init.apply(this, arguments);
};
IndexBinaryTree.prototype.init = function (keys, options, collection) {
this._btree = new (btree.create(2, this.sortAsc))();
this._size = 0;
this._id = this._itemKeyHash(keys, keys);
this.unique(options && options.unique ? options.unique : false);
if (keys !== undefined) {
this.keys(keys);
}
if (collection !== undefined) {
this.collection(collection);
}
this.name(options && options.name ? options.name : this._id);
};
Shared.addModule('IndexBinaryTree', IndexBinaryTree);
Shared.mixin(IndexBinaryTree.prototype, 'Mixin.ChainReactor');
Shared.mixin(IndexBinaryTree.prototype, 'Mixin.Sorting');
IndexBinaryTree.prototype.id = function () {
return this._id;
};
IndexBinaryTree.prototype.state = function () {
return this._state;
};
IndexBinaryTree.prototype.size = function () {
return this._size;
};
Shared.synthesize(IndexBinaryTree.prototype, 'data');
Shared.synthesize(IndexBinaryTree.prototype, 'name');
Shared.synthesize(IndexBinaryTree.prototype, 'collection');
Shared.synthesize(IndexBinaryTree.prototype, 'type');
Shared.synthesize(IndexBinaryTree.prototype, 'unique');
IndexBinaryTree.prototype.keys = function (val) {
if (val !== undefined) {
this._keys = val;
// Count the keys
this._keyCount = (new Path()).parse(this._keys).length;
return this;
}
return this._keys;
};
IndexBinaryTree.prototype.rebuild = function () {
// Do we have a collection?
if (this._collection) {
// Get sorted data
var collection = this._collection.subset({}, {
$decouple: false,
$orderBy: this._keys
}),
collectionData = collection.find(),
dataIndex,
dataCount = collectionData.length;
// Clear the index data for the index
this._btree = new (btree.create(2, this.sortAsc))();
if (this._unique) {
this._uniqueLookup = {};
}
// Loop the collection data
for (dataIndex = 0; dataIndex < dataCount; dataIndex++) {
this.insert(collectionData[dataIndex]);
}
}
this._state = {
name: this._name,
keys: this._keys,
indexSize: this._size,
built: new Date(),
updated: new Date(),
ok: true
};
};
IndexBinaryTree.prototype.insert = function (dataItem, options) {
var uniqueFlag = this._unique,
uniqueHash,
dataItemHash = this._itemKeyHash(dataItem, this._keys),
keyArr;
if (uniqueFlag) {
uniqueHash = this._itemHash(dataItem, this._keys);
this._uniqueLookup[uniqueHash] = dataItem;
}
// We store multiple items that match a key inside an array
// that is then stored against that key in the tree...
// Check if item exists for this key already
keyArr = this._btree.get(dataItemHash);
// Check if the array exists
if (keyArr === undefined) {
// Generate an array for this key first
keyArr = [];
// Put the new array into the tree under the key
this._btree.put(dataItemHash, keyArr);
}
// Push the item into the array
keyArr.push(dataItem);
this._size++;
};
IndexBinaryTree.prototype.remove = function (dataItem, options) {
var uniqueFlag = this._unique,
uniqueHash,
dataItemHash = this._itemKeyHash(dataItem, this._keys),
keyArr,
itemIndex;
if (uniqueFlag) {
uniqueHash = this._itemHash(dataItem, this._keys);
delete this._uniqueLookup[uniqueHash];
}
// Try and get the array for the item hash key
keyArr = this._btree.get(dataItemHash);
if (keyArr !== undefined) {
// The key array exits, remove the item from the key array
itemIndex = keyArr.indexOf(dataItem);
if (itemIndex > -1) {
// Check the length of the array
if (keyArr.length === 1) {
// This item is the last in the array, just kill the tree entry
this._btree.del(dataItemHash);
} else {
// Remove the item
keyArr.splice(itemIndex, 1);
}
this._size--;
}
}
};
IndexBinaryTree.prototype.violation = function (dataItem) {
// Generate item hash
var uniqueHash = this._itemHash(dataItem, this._keys);
// Check if the item breaks the unique constraint
return Boolean(this._uniqueLookup[uniqueHash]);
};
IndexBinaryTree.prototype.hashViolation = function (uniqueHash) {
// Check if the item breaks the unique constraint
return Boolean(this._uniqueLookup[uniqueHash]);
};
IndexBinaryTree.prototype.lookup = function (query) {
return this._data[this._itemHash(query, this._keys)] || [];
};
IndexBinaryTree.prototype.match = function (query, options) {
// Check if the passed query has data in the keys our index
// operates on and if so, is the query sort matching our order
var pathSolver = new Path();
var indexKeyArr = pathSolver.parseArr(this._keys),
queryArr = pathSolver.parseArr(query),
matchedKeys = [],
matchedKeyCount = 0,
i;
// Loop the query array and check the order of keys against the
// index key array to see if this index can be used
for (i = 0; i < indexKeyArr.length; i++) {
if (queryArr[i] === indexKeyArr[i]) {
matchedKeyCount++;
matchedKeys.push(queryArr[i]);
} else {
// Query match failed - this is a hash map index so partial key match won't work
return {
matchedKeys: [],
totalKeyCount: queryArr.length,
score: 0
};
}
}
return {
matchedKeys: matchedKeys,
totalKeyCount: queryArr.length,
score: matchedKeyCount
};
//return pathSolver.countObjectPaths(this._keys, query);
};
IndexBinaryTree.prototype._itemHash = function (item, keys) {
var path = new Path(),
pathData,
hash = '',
k;
pathData = path.parse(keys);
for (k = 0; k < pathData.length; k++) {
if (hash) { hash += '_'; }
hash += path.value(item, pathData[k].path).join(':');
}
return hash;
};
IndexBinaryTree.prototype._itemKeyHash = function (item, keys) {
var path = new Path(),
pathData,
hash = '',
k;
pathData = path.parse(keys);
for (k = 0; k < pathData.length; k++) {
if (hash) { hash += '_'; }
hash += path.keyValue(item, pathData[k].path);
}
return hash;
};
IndexBinaryTree.prototype._itemHashArr = function (item, keys) {
var path = new Path(),
pathData,
//hash = '',
hashArr = [],
valArr,
i, k, j;
pathData = path.parse(keys);
for (k = 0; k < pathData.length; k++) {
valArr = path.value(item, pathData[k].path);
for (i = 0; i < valArr.length; i++) {
if (k === 0) {
// Setup the initial hash array
hashArr.push(valArr[i]);
} else {
// Loop the hash array and concat the value to it
for (j = 0; j < hashArr.length; j++) {
hashArr[j] = hashArr[j] + '_' + valArr[i];
}
}
}
}
return hashArr;
};
Shared.finishModule('IndexBinaryTree');
module.exports = IndexBinaryTree;
},{"./Path":26,"./Shared":29}],10:[function(_dereq_,module,exports){
"use strict";
var Shared = _dereq_('./Shared'),
Path = _dereq_('./Path');
/**
* The index class used to instantiate hash map indexes that the database can
* use to speed up queries on collections and views.
* @constructor
*/
var IndexHashMap = function () {
this.init.apply(this, arguments);
};
IndexHashMap.prototype.init = function (keys, options, collection) {
this._crossRef = {};
this._size = 0;
this._id = this._itemKeyHash(keys, keys);
this.data({});
this.unique(options && options.unique ? options.unique : false);
if (keys !== undefined) {
this.keys(keys);
}
if (collection !== undefined) {
this.collection(collection);
}
this.name(options && options.name ? options.name : this._id);
};
Shared.addModule('IndexHashMap', IndexHashMap);
Shared.mixin(IndexHashMap.prototype, 'Mixin.ChainReactor');
IndexHashMap.prototype.id = function () {
return this._id;
};
IndexHashMap.prototype.state = function () {
return this._state;
};
IndexHashMap.prototype.size = function () {
return this._size;
};
Shared.synthesize(IndexHashMap.prototype, 'data');
Shared.synthesize(IndexHashMap.prototype, 'name');
Shared.synthesize(IndexHashMap.prototype, 'collection');
Shared.synthesize(IndexHashMap.prototype, 'type');
Shared.synthesize(IndexHashMap.prototype, 'unique');
IndexHashMap.prototype.keys = function (val) {
if (val !== undefined) {
this._keys = val;
// Count the keys
this._keyCount = (new Path()).parse(this._keys).length;
return this;
}
return this._keys;
};
IndexHashMap.prototype.rebuild = function () {
// Do we have a collection?
if (this._collection) {
// Get sorted data
var collection = this._collection.subset({}, {
$decouple: false,
$orderBy: this._keys
}),
collectionData = collection.find(),
dataIndex,
dataCount = collectionData.length;
// Clear the index data for the index
this._data = {};
if (this._unique) {
this._uniqueLookup = {};
}
// Loop the collection data
for (dataIndex = 0; dataIndex < dataCount; dataIndex++) {
this.insert(collectionData[dataIndex]);
}
}
this._state = {
name: this._name,
keys: this._keys,
indexSize: this._size,
built: new Date(),
updated: new Date(),
ok: true
};
};
IndexHashMap.prototype.insert = function (dataItem, options) {
var uniqueFlag = this._unique,
uniqueHash,
itemHashArr,
hashIndex;
if (uniqueFlag) {
uniqueHash = this._itemHash(dataItem, this._keys);
this._uniqueLookup[uniqueHash] = dataItem;
}
// Generate item hash
itemHashArr = this._itemHashArr(dataItem, this._keys);
// Get the path search results and store them
for (hashIndex = 0; hashIndex < itemHashArr.length; hashIndex++) {
this.pushToPathValue(itemHashArr[hashIndex], dataItem);
}
};
IndexHashMap.prototype.remove = function (dataItem, options) {
var uniqueFlag = this._unique,
uniqueHash,
itemHashArr,
hashIndex;
if (uniqueFlag) {
uniqueHash = this._itemHash(dataItem, this._keys);
delete this._uniqueLookup[uniqueHash];
}
// Generate item hash
itemHashArr = this._itemHashArr(dataItem, this._keys);
// Get the path search results and store them
for (hashIndex = 0; hashIndex < itemHashArr.length; hashIndex++) {
this.pullFromPathValue(itemHashArr[hashIndex], dataItem);
}
};
IndexHashMap.prototype.violation = function (dataItem) {
// Generate item hash
var uniqueHash = this._itemHash(dataItem, this._keys);
// Check if the item breaks the unique constraint
return Boolean(this._uniqueLookup[uniqueHash]);
};
IndexHashMap.prototype.hashViolation = function (uniqueHash) {
// Check if the item breaks the unique constraint
return Boolean(this._uniqueLookup[uniqueHash]);
};
IndexHashMap.prototype.pushToPathValue = function (hash, obj) {
var pathValArr = this._data[hash] = this._data[hash] || [];
// Make sure we have not already indexed this object at this path/value
if (pathValArr.indexOf(obj) === -1) {
// Index the object
pathValArr.push(obj);
// Record the reference to this object in our index size
this._size++;
// Cross-reference this association for later lookup
this.pushToCrossRef(obj, pathValArr);
}
};
IndexHashMap.prototype.pullFromPathValue = function (hash, obj) {
var pathValArr = this._data[hash],
indexOfObject;
// Make sure we have already indexed this object at this path/value
indexOfObject = pathValArr.indexOf(obj);
if (indexOfObject > -1) {
// Un-index the object
pathValArr.splice(indexOfObject, 1);
// Record the reference to this object in our index size
this._size--;
// Remove object cross-reference
this.pullFromCrossRef(obj, pathValArr);
}
// Check if we should remove the path value array
if (!pathValArr.length) {
// Remove the array
delete this._data[hash];
}
};
IndexHashMap.prototype.pull = function (obj) {
// Get all places the object has been used and remove them
var id = obj[this._collection.primaryKey()],
crossRefArr = this._crossRef[id],
arrIndex,
arrCount = crossRefArr.length,
arrItem;
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
arrItem = crossRefArr[arrIndex];
// Remove item from this index lookup array
this._pullFromArray(arrItem, obj);
}
// Record the reference to this object in our index size
this._size--;
// Now remove the cross-reference entry for this object
delete this._crossRef[id];
};
IndexHashMap.prototype._pullFromArray = function (arr, obj) {
var arrCount = arr.length;
while (arrCount--) {
if (arr[arrCount] === obj) {
arr.splice(arrCount, 1);
}
}
};
IndexHashMap.prototype.pushToCrossRef = function (obj, pathValArr) {
var id = obj[this._collection.primaryKey()],
crObj;
this._crossRef[id] = this._crossRef[id] || [];
// Check if the cross-reference to the pathVal array already exists
crObj = this._crossRef[id];
if (crObj.indexOf(pathValArr) === -1) {
// Add the cross-reference
crObj.push(pathValArr);
}
};
IndexHashMap.prototype.pullFromCrossRef = function (obj, pathValArr) {
var id = obj[this._collection.primaryKey()];
delete this._crossRef[id];
};
IndexHashMap.prototype.lookup = function (query) {
return this._data[this._itemHash(query, this._keys)] || [];
};
IndexHashMap.prototype.match = function (query, options) {
// Check if the passed query has data in the keys our index
// operates on and if so, is the query sort matching our order
var pathSolver = new Path();
var indexKeyArr = pathSolver.parseArr(this._keys),
queryArr = pathSolver.parseArr(query),
matchedKeys = [],
matchedKeyCount = 0,
i;
// Loop the query array and check the order of keys against the
// index key array to see if this index can be used
for (i = 0; i < indexKeyArr.length; i++) {
if (queryArr[i] === indexKeyArr[i]) {
matchedKeyCount++;
matchedKeys.push(queryArr[i]);
} else {
// Query match failed - this is a hash map index so partial key match won't work
return {
matchedKeys: [],
totalKeyCount: queryArr.length,
score: 0
};
}
}
return {
matchedKeys: matchedKeys,
totalKeyCount: queryArr.length,
score: matchedKeyCount
};
//return pathSolver.countObjectPaths(this._keys, query);
};
IndexHashMap.prototype._itemHash = function (item, keys) {
var path = new Path(),
pathData,
hash = '',
k;
pathData = path.parse(keys);
for (k = 0; k < pathData.length; k++) {
if (hash) { hash += '_'; }
hash += path.value(item, pathData[k].path).join(':');
}
return hash;
};
IndexHashMap.prototype._itemKeyHash = function (item, keys) {
var path = new Path(),
pathData,
hash = '',
k;
pathData = path.parse(keys);
for (k = 0; k < pathData.length; k++) {
if (hash) { hash += '_'; }
hash += path.keyValue(item, pathData[k].path);
}
return hash;
};
IndexHashMap.prototype._itemHashArr = function (item, keys) {
var path = new Path(),
pathData,
//hash = '',
hashArr = [],
valArr,
i, k, j;
pathData = path.parse(keys);
for (k = 0; k < pathData.length; k++) {
valArr = path.value(item, pathData[k].path);
for (i = 0; i < valArr.length; i++) {
if (k === 0) {
// Setup the initial hash array
hashArr.push(valArr[i]);
} else {
// Loop the hash array and concat the value to it
for (j = 0; j < hashArr.length; j++) {
hashArr[j] = hashArr[j] + '_' + valArr[i];
}
}
}
}
return hashArr;
};
Shared.finishModule('IndexHashMap');
module.exports = IndexHashMap;
},{"./Path":26,"./Shared":29}],11:[function(_dereq_,module,exports){
"use strict";
var Shared = _dereq_('./Shared');
/**
* The key value store class used when storing basic in-memory KV data,
* and can be queried for quick retrieval. Mostly used for collection
* primary key indexes and lookups.
* @param {String=} name Optional KV store name.
* @constructor
*/
var KeyValueStore = function (name) {
this.init.apply(this, arguments);
};
KeyValueStore.prototype.init = function (name) {
this._name = name;
this._data = {};
this._primaryKey = '_id';
};
Shared.addModule('KeyValueStore', KeyValueStore);
Shared.mixin(KeyValueStore.prototype, 'Mixin.ChainReactor');
/**
* Get / set the name of the key/value store.
* @param {String} val The name to set.
* @returns {*}
*/
Shared.synthesize(KeyValueStore.prototype, 'name');
/**
* Get / set the primary key.
* @param {String} key The key to set.
* @returns {*}
*/
KeyValueStore.prototype.primaryKey = function (key) {
if (key !== undefined) {
this._primaryKey = key;
return this;
}
return this._primaryKey;
};
/**
* Removes all data from the store.
* @returns {*}
*/
KeyValueStore.prototype.truncate = function () {
this._data = {};
return this;
};
/**
* Sets data against a key in the store.
* @param {String} key The key to set data for.
* @param {*} value The value to assign to the key.
* @returns {*}
*/
KeyValueStore.prototype.set = function (key, value) {
this._data[key] = value ? value : true;
return this;
};
/**
* Gets data stored for the passed key.
* @param {String} key The key to get data for.
* @returns {*}
*/
KeyValueStore.prototype.get = function (key) {
return this._data[key];
};
/**
* Get / set the primary key.
* @param {*} obj A lookup query, can be a string key, an array of string keys,
* an object with further query clauses or a regular expression that should be
* run against all keys.
* @returns {*}
*/
KeyValueStore.prototype.lookup = function (obj) {
var pKeyVal = obj[this._primaryKey],
arrIndex,
arrCount,
lookupItem,
result;
if (pKeyVal instanceof Array) {
// An array of primary keys, find all matches
arrCount = pKeyVal.length;
result = [];
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
lookupItem = this._data[pKeyVal[arrIndex]];
if (lookupItem) {
result.push(lookupItem);
}
}
return result;
} else if (pKeyVal instanceof RegExp) {
// Create new data
result = [];
for (arrIndex in this._data) {
if (this._data.hasOwnProperty(arrIndex)) {
if (pKeyVal.test(arrIndex)) {
result.push(this._data[arrIndex]);
}
}
}
return result;
} else if (typeof pKeyVal === 'object') {
// The primary key clause is an object, now we have to do some
// more extensive searching
if (pKeyVal.$ne) {
// Create new data
result = [];
for (arrIndex in this._data) {
if (this._data.hasOwnProperty(arrIndex)) {
if (arrIndex !== pKeyVal.$ne) {
result.push(this._data[arrIndex]);
}
}
}
return result;
}
if (pKeyVal.$in && (pKeyVal.$in instanceof Array)) {
// Create new data
result = [];
for (arrIndex in this._data) {
if (this._data.hasOwnProperty(arrIndex)) {
if (pKeyVal.$in.indexOf(arrIndex) > -1) {
result.push(this._data[arrIndex]);
}
}
}
return result;
}
if (pKeyVal.$nin && (pKeyVal.$nin instanceof Array)) {
// Create new data
result = [];
for (arrIndex in this._data) {
if (this._data.hasOwnProperty(arrIndex)) {
if (pKeyVal.$nin.indexOf(arrIndex) === -1) {
result.push(this._data[arrIndex]);
}
}
}
return result;
}
if (pKeyVal.$or && (pKeyVal.$or instanceof Array)) {
// Create new data
result = [];
for (arrIndex = 0; arrIndex < pKeyVal.$or.length; arrIndex++) {
result = result.concat(this.lookup(pKeyVal.$or[arrIndex]));
}
return result;
}
} else {
// Key is a basic lookup from string
lookupItem = this._data[pKeyVal];
if (lookupItem !== undefined) {
return [lookupItem];
} else {
return [];
}
}
};
/**
* Removes data for the given key from the store.
* @param {String} key The key to un-set.
* @returns {*}
*/
KeyValueStore.prototype.unSet = function (key) {
delete this._data[key];
return this;
};
/**
* Sets data for the give key in the store only where the given key
* does not already have a value in the store.
* @param {String} key The key to set data for.
* @param {*} value The value to assign to the key.
* @returns {Boolean} True if data was set or false if data already
* exists for the key.
*/
KeyValueStore.prototype.uniqueSet = function (key, value) {
if (this._data[key] === undefined) {
this._data[key] = value;
return true;
}
return false;
};
Shared.finishModule('KeyValueStore');
module.exports = KeyValueStore;
},{"./Shared":29}],12:[function(_dereq_,module,exports){
"use strict";
var Shared = _dereq_('./Shared'),
Operation = _dereq_('./Operation');
/**
* The metrics class used to store details about operations.
* @constructor
*/
var Metrics = function () {
this.init.apply(this, arguments);
};
Metrics.prototype.init = function () {
this._data = [];
};
Shared.addModule('Metrics', Metrics);
Shared.mixin(Metrics.prototype, 'Mixin.ChainReactor');
/**
* Creates an operation within the metrics instance and if metrics
* are currently enabled (by calling the start() method) the operation
* is also stored in the metrics log.
* @param {String} name The name of the operation.
* @returns {Operation}
*/
Metrics.prototype.create = function (name) {
var op = new Operation(name);
if (this._enabled) {
this._data.push(op);
}
return op;
};
/**
* Starts logging operations.
* @returns {Metrics}
*/
Metrics.prototype.start = function () {
this._enabled = true;
return this;
};
/**
* Stops logging operations.
* @returns {Metrics}
*/
Metrics.prototype.stop = function () {
this._enabled = false;
return this;
};
/**
* Clears all logged operations.
* @returns {Metrics}
*/
Metrics.prototype.clear = function () {
this._data = [];
return this;
};
/**
* Returns an array of all logged operations.
* @returns {Array}
*/
Metrics.prototype.list = function () {
return this._data;
};
Shared.finishModule('Metrics');
module.exports = Metrics;
},{"./Operation":23,"./Shared":29}],13:[function(_dereq_,module,exports){
"use strict";
var CRUD = {
preSetData: function () {
},
postSetData: function () {
}
};
module.exports = CRUD;
},{}],14:[function(_dereq_,module,exports){
"use strict";
// TODO: Document the methods in this mixin
var ChainReactor = {
chain: function (obj) {
this._chain = this._chain || [];
var index = this._chain.indexOf(obj);
if (index === -1) {
this._chain.push(obj);
}
},
unChain: function (obj) {
if (this._chain) {
var index = this._chain.indexOf(obj);
if (index > -1) {
this._chain.splice(index, 1);
}
}
},
chainSend: function (type, data, options) {
if (this._chain) {
var arr = this._chain,
count = arr.length,
index;
for (index = 0; index < count; index++) {
arr[index].chainReceive(this, type, data, options);
}
}
},
chainReceive: function (sender, type, data, options) {
var chainPacket = {
sender: sender,
type: type,
data: data,
options: options
};
// Fire our internal handler
if (!this._chainHandler || (this._chainHandler && !this._chainHandler(chainPacket))) {
// Propagate the message down the chain
this.chainSend(chainPacket.type, chainPacket.data, chainPacket.options);
}
}
};
module.exports = ChainReactor;
},{}],15:[function(_dereq_,module,exports){
"use strict";
var idCounter = 0,
Overload = _dereq_('./Overload'),
Common;
Common = {
/**
* Gets / sets data in the item store. The store can be used to set and
* retrieve data against a key. Useful for adding arbitrary key/value data
* to a collection / view etc and retrieving it later.
* @param {String|*} key The key under which to store the passed value or
* retrieve the existing stored value.
* @param {*=} val Optional value. If passed will overwrite the existing value
* stored against the specified key if one currently exists.
* @returns {*}
*/
store: function (key, val) {
if (key !== undefined) {
if (val !== undefined) {
// Store the data
this._store = this._store || {};
this._store[key] = val;
return this;
}
if (this._store) {
return this._store[key];
}
}
return undefined;
},
/**
* Removes a previously stored key/value pair from the item store, set previously
* by using the store() method.
* @param {String|*} key The key of the key/value pair to remove;
* @returns {Common} Returns this for chaining.
*/
unStore: function (key) {
if (key !== undefined) {
delete this._store[key];
}
return this;
},
/**
* Returns a non-referenced version of the passed object / array.
* @param {Object} data The object or array to return as a non-referenced version.
* @param {Number=} copies Optional number of copies to produce. If specified, the return
* value will be an array of decoupled objects, each distinct from the other.
* @returns {*}
*/
decouple: function (data, copies) {
if (data !== undefined) {
if (!copies) {
return JSON.parse(JSON.stringify(data));
} else {
var i,
json = JSON.stringify(data),
copyArr = [];
for (i = 0; i < copies; i++) {
copyArr.push(JSON.parse(json));
}
return copyArr;
}
}
return undefined;
},
/**
* Generates a new 16-character hexadecimal unique ID or
* generates a new 16-character hexadecimal ID based on
* the passed string. Will always generate the same ID
* for the same string.
* @param {String=} str A string to generate the ID from.
* @return {String}
*/
objectId: function (str) {
var id,
pow = Math.pow(10, 17);
if (!str) {
idCounter++;
id = (idCounter + (
Math.random() * pow +
Math.random() * pow +
Math.random() * pow +
Math.random() * pow
)).toString(16);
} else {
var val = 0,
count = str.length,
i;
for (i = 0; i < count; i++) {
val += str.charCodeAt(i) * pow;
}
id = val.toString(16);
}
return id;
},
/**
* Gets / sets debug flag that can enable debug message output to the
* console if required.
* @param {Boolean} val The value to set debug flag to.
* @return {Boolean} True if enabled, false otherwise.
*/
/**
* Sets debug flag for a particular type that can enable debug message
* output to the console if required.
* @param {String} type The name of the debug type to set flag for.
* @param {Boolean} val The value to set debug flag to.
* @return {Boolean} True if enabled, false otherwise.
*/
debug: new Overload([
function () {
return this._debug && this._debug.all;
},
function (val) {
if (val !== undefined) {
if (typeof val === 'boolean') {
this._debug = this._debug || {};
this._debug.all = val;
this.chainSend('debug', this._debug);
return this;
} else {
return (this._debug && this._debug[val]) || (this._db && this._db._debug && this._db._debug[val]) || (this._debug && this._debug.all);
}
}
return this._debug && this._debug.all;
},
function (type, val) {
if (type !== undefined) {
if (val !== undefined) {
this._debug = this._debug || {};
this._debug[type] = val;
this.chainSend('debug', this._debug);
return this;
}
return (this._debug && this._debug[val]) || (this._db && this._db._debug && this._db._debug[type]);
}
return this._debug && this._debug.all;
}
])
};
module.exports = Common;
},{"./Overload":24}],16:[function(_dereq_,module,exports){
"use strict";
var Constants = {
TYPE_INSERT: 0,
TYPE_UPDATE: 1,
TYPE_REMOVE: 2,
PHASE_BEFORE: 0,
PHASE_AFTER: 1
};
module.exports = Constants;
},{}],17:[function(_dereq_,module,exports){
"use strict";
var Overload = _dereq_('./Overload');
var Events = {
on: new Overload({
/**
* Attach an event listener to the passed event.
* @param {String} event The name of the event to listen for.
* @param {Function} listener The method to call when the event is fired.
*/
'string, function': function (event, listener) {
this._listeners = this._listeners || {};
this._listeners[event] = this._listeners[event] || {};
this._listeners[event]['*'] = this._listeners[event]['*'] || [];
this._listeners[event]['*'].push(listener);
return this;
},
/**
* Attach an event listener to the passed event only if the passed
* id matches the document id for the event being fired.
* @param {String} event The name of the event to listen for.
* @param {*} id The document id to match against.
* @param {Function} listener The method to call when the event is fired.
*/
'string, *, function': function (event, id, listener) {
this._listeners = this._listeners || {};
this._listeners[event] = this._listeners[event] || {};
this._listeners[event][id] = this._listeners[event][id] || [];
this._listeners[event][id].push(listener);
return this;
}
}),
off: new Overload({
'string': function (event) {
if (this._listeners && this._listeners[event] && event in this._listeners) {
delete this._listeners[event];
}
return this;
},
'string, function': function (event, listener) {
var arr,
index;
if (typeof(listener) === 'string') {
if (this._listeners && this._listeners[event] && this._listeners[event][listener]) {
delete this._listeners[event][listener];
}
} else {
if (event in this._listeners) {
arr = this._listeners[event]['*'];
index = arr.indexOf(listener);
if (index > -1) {
arr.splice(index, 1);
}
}
}
return this;
},
'string, *, function': function (event, id, listener) {
if (this._listeners && event in this._listeners && id in this.listeners[event]) {
var arr = this._listeners[event][id],
index = arr.indexOf(listener);
if (index > -1) {
arr.splice(index, 1);
}
}
},
'string, *': function (event, id) {
if (this._listeners && event in this._listeners && id in this._listeners[event]) {
// Kill all listeners for this event id
delete this._listeners[event][id];
}
}
}),
emit: function (event, data) {
this._listeners = this._listeners || {};
if (event in this._listeners) {
var arrIndex,
arrCount;
// Handle global emit
if (this._listeners[event]['*']) {
var arr = this._listeners[event]['*'];
arrCount = arr.length;
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
arr[arrIndex].apply(this, Array.prototype.slice.call(arguments, 1));
}
}
// Handle individual emit
if (data instanceof Array) {
// Check if the array is an array of objects in the collection
if (data[0] && data[0][this._primaryKey]) {
// Loop the array and check for listeners against the primary key
var listenerIdArr = this._listeners[event],
listenerIdCount,
listenerIdIndex;
arrCount = data.length;
for (arrIndex = 0; arrIndex < arrCount; arrIndex++) {
if (listenerIdArr[data[arrIndex][this._primaryKey]]) {
// Emit for this id
listenerIdCount = listenerIdArr[data[arrIndex][this._primaryKey]].length;
for (listenerIdIndex = 0; listenerIdIndex < listenerIdCount; listenerIdIndex++) {
listenerIdArr[data[arrIndex][this._primaryKey]][listenerIdIndex].apply(this, Array.prototype.slice.call(arguments, 1));
}
}
}
}
}
}
return this;
}
};
module.exports = Events;
},{"./Overload":24}],18:[function(_dereq_,module,exports){
"use strict";
var Matching = {
/**
* Internal method that checks a document against a test object.
* @param {*} source The source object or value to test against.
* @param {*} test The test object or value to test with.
* @param {String=} opToApply The special operation to apply to the test such
* as 'and' or an 'or' operator.
* @param {Object=} options An object containing options to apply to the
* operation such as limiting the fields returned etc.
* @returns {Boolean} True if the test was positive, false on negative.
* @private
*/
_match: function (source, test, opToApply, options) {
// TODO: This method is quite long, break into smaller pieces
var operation,
applyOp,
recurseVal,
tmpIndex,
sourceType = typeof source,
testType = typeof test,
matchedAll = true,
opResult,
substringCache,
i;
options = options || {};
// Check if options currently holds a root query object
if (!options.$rootQuery) {
// Root query not assigned, hold the root query
options.$rootQuery = test;
}
// Check if the comparison data are both strings or numbers
if ((sourceType === 'string' || sourceType === 'number') && (testType === 'string' || testType === 'number')) {
// The source and test data are flat types that do not require recursive searches,
// so just compare them and return the result
if (sourceType === 'number') {
// Number comparison
if (source !== test) {
matchedAll = false;
}
} else {
// String comparison
if (source.localeCompare(test)) {
matchedAll = false;
}
}
} else {
for (i in test) {
if (test.hasOwnProperty(i)) {
// Reset operation flag
operation = false;
substringCache = i.substr(0, 2);
// Check if the property is a comment (ignorable)
if (substringCache === '//') {
// Skip this property
continue;
}
// Check if the property starts with a dollar (function)
if (substringCache.indexOf('$') === 0) {
// Ask the _matchOp method to handle the operation
opResult = this._matchOp(i, source, test[i], options);
// Check the result of the matchOp operation
// If the result is -1 then no operation took place, otherwise the result
// will be a boolean denoting a match (true) or no match (false)
if (opResult > -1) {
if (opResult) {
if (opToApply === 'or') {
return true;
}
} else {
// Set the matchedAll flag to the result of the operation
// because the operation did not return true
matchedAll = opResult;
}
// Record that an operation was handled
operation = true;
}
}
// Check for regex
if (!operation && test[i] instanceof RegExp) {
operation = true;
if (typeof(source) === 'object' && source[i] !== undefined && test[i].test(source[i])) {
if (opToApply === 'or') {
return true;
}
} else {
matchedAll = false;
}
}
if (!operation) {
// Check if our query is an object
if (typeof(test[i]) === 'object') {
// Because test[i] is an object, source must also be an object
// Check if our source data we are checking the test query against
// is an object or an array
if (source[i] !== undefined) {
if (source[i] instanceof Array && !(test[i] instanceof Array)) {
// The source data is an array, so check each item until a
// match is found
recurseVal = false;
for (tmpIndex = 0; tmpIndex < source[i].length; tmpIndex++) {
recurseVal = this._match(source[i][tmpIndex], test[i], applyOp, options);
if (recurseVal) {
// One of the array items matched the query so we can
// include this item in the results, so break now
break;
}
}
if (recurseVal) {
if (opToApply === 'or') {
return true;
}
} else {
matchedAll = false;
}
} else if (!(source[i] instanceof Array) && test[i] instanceof Array) {
// The test key data is an array and the source key data is not so check
// each item in the test key data to see if the source item matches one
// of them. This is effectively an $in search.
recurseVal = false;
for (tmpIndex = 0; tmpIndex < test[i].length; tmpIndex++) {
recurseVal = this._match(source[i], test[i][tmpIndex], applyOp, options);
if (recurseVal) {
// One of the array items matched the query so we can
// include this item in the results, so break now
break;
}
}
if (recurseVal) {
if (opToApply === 'or') {
return true;
}
} else {
matchedAll = false;
}
} else if (typeof(source) === 'object') {
// Recurse down the object tree
recurseVal = this._match(source[i], test[i], applyOp, options);
if (recurseVal) {
if (opToApply === 'or') {
return true;
}
} else {
matchedAll = false;
}
} else {
recurseVal = this._match(undefined, test[i], applyOp, options);
if (recurseVal) {
if (opToApply === 'or') {
return true;
}
} else {
matchedAll = false;
}
}
} else {
// First check if the test match is an $exists
if (test[i] && test[i].$exists !== undefined) {
// Push the item through another match recurse
recurseVal = this._match(undefined, test[i], applyOp, options);
if (recurseVal) {
if (opToApply === 'or') {
return true;
}
} else {
matchedAll = false;
}
} else {
matchedAll = false;
}
}
} else {
// Check if the prop matches our test value
if (source && source[i] === test[i]) {
if (opToApply === 'or') {
return true;
}
} else if (source && source[i] && source[i] instanceof Array && test[i] && typeof(test[i]) !== "object") {
// We are looking for a value inside an array
// The source data is an array, so check each item until a
// match is found
recurseVal = false;
for (tmpIndex = 0; tmpIndex < source[i].length; tmpIndex++) {
recurseVal = this._match(source[i][tmpIndex], test[i], applyOp, options);
if (recurseVal) {
// One of the array items matched the query so we can
// include this item in the results, so break now
break;
}
}
if (recurseVal) {
if (opToApply === 'or') {
return true;
}
} else {
matchedAll = false;
}
} else {
matchedAll = false;
}
}
}
if (opToApply === 'and' && !matchedAll) {
return false;
}
}
}
}
return matchedAll;
},
/**
* Internal method, performs a matching process against a query operator such as $gt or $nin.
* @param {String} key The property name in the test that matches the operator to perform
* matching against.
* @param {*} source The source data to match the query against.
* @param {*} test The query to match the source against.
* @param {Object=} options An options object.
* @returns {*}
* @private
*/
_matchOp: function (key, source, test, options) {
// Check for commands
switch (key) {
case '$gt':
// Greater than
return source > test;
case '$gte':
// Greater than or equal
return source >= test;
case '$lt':
// Less than
return source < test;
case '$lte':
// Less than or equal
return source <= test;
case '$exists':
// Property exists
return (source === undefined) !== test;
case '$ne': // Not equals
return source != test; // jshint ignore:line
case '$or':
// Match true on ANY check to pass
for (var orIndex = 0; orIndex < test.length; orIndex++) {
if (this._match(source, test[orIndex], 'and', options)) {
return true;
}
}
return false;
case '$and':
// Match true on ALL checks to pass
for (var andIndex = 0; andIndex < test.length; andIndex++) {
if (!this._match(source, test[andIndex], 'and', options)) {
return false;
}
}
return true;
case '$in': // In
// Check that the in test is an array
if (test instanceof Array) {
var inArr = test,
inArrCount = inArr.length,
inArrIndex;
for (inArrIndex = 0; inArrIndex < inArrCount; inArrIndex++) {
if (inArr[inArrIndex] === source) {
return true;
}
}
return false;
} else {
throw('ForerunnerDB.Mixin.Matching "' + this.name() + '": Cannot use an $in operator on a non-array key: ' + key);
}
break;
case '$nin': // Not in
// Check that the not-in test is an array
if (test instanceof Array) {
var notInArr = test,
notInArrCount = notInArr.length,
notInArrIndex;
for (notInArrIndex = 0; notInArrIndex < notInArrCount; notInArrIndex++) {
if (notInArr[notInArrIndex] === source) {
return false;
}
}
return true;
} else {
throw('ForerunnerDB.Mixin.Matching "' + this.name() + '": Cannot use a $nin operator on a non-array key: ' + key);
}
break;
case '$distinct':
// Ensure options holds a distinct lookup
options.$rootQuery['//distinctLookup'] = options.$rootQuery['//distinctLookup'] || {};
for (var distinctProp in test) {
if (test.hasOwnProperty(distinctProp)) {
options.$rootQuery['//distinctLookup'][distinctProp] = options.$rootQuery['//distinctLookup'][distinctProp] || {};
// Check if the options distinct lookup has this field's value
if (options.$rootQuery['//distinctLookup'][distinctProp][source[distinctProp]]) {
// Value is already in use
return false;
} else {
// Set the value in the lookup
options.$rootQuery['//distinctLookup'][distinctProp][source[distinctProp]] = true;
// Allow the item in the results
return true;
}
}
}
break;
}
return -1;
}
};
module.exports = Matching;
},{}],19:[function(_dereq_,module,exports){
"use strict";
var Sorting = {
/**
* Sorts the passed value a against the passed value b ascending.
* @param {*} a The first value to compare.
* @param {*} b The second value to compare.
* @returns {*} 1 if a is sorted after b, -1 if a is sorted before b.
*/
sortAsc: function (a, b) {
if (typeof(a) === 'string' && typeof(b) === 'string') {
return a.localeCompare(b);
} else {
if (a > b) {
return 1;
} else if (a < b) {
return -1;
}
}
return 0;
},
/**
* Sorts the passed value a against the passed value b descending.
* @param {*} a The first value to compare.
* @param {*} b The second value to compare.
* @returns {*} 1 if a is sorted after b, -1 if a is sorted before b.
*/
sortDesc: function (a, b) {
if (typeof(a) === 'string' && typeof(b) === 'string') {
return b.localeCompare(a);
} else {
if (a > b) {
return -1;
} else if (a < b) {
return 1;
}
}
return 0;
}
};
module.exports = Sorting;
},{}],20:[function(_dereq_,module,exports){
"use strict";
var Triggers = {
/**
* Add a trigger by id.
* @param {String} id The id of the trigger. This must be unique to the type and
* phase of the trigger. Only one trigger may be added with this id per type and
* phase.
* @param {Number} type The type of operation to apply the trigger to. See
* Mixin.Constants for constants to use.
* @param {Number} phase The phase of an operation to fire the trigger on. See
* Mixin.Constants for constants to use.
* @param {Function} method The method to call when the trigger is fired.
* @returns {boolean} True if the trigger was added successfully, false if not.
*/
addTrigger: function (id, type, phase, method) {
var self = this,
triggerIndex;
// Check if the trigger already exists
triggerIndex = self._triggerIndexOf(id, type, phase);
if (triggerIndex === -1) {
// The trigger does not exist, create it
self._trigger = self._trigger || {};
self._trigger[type] = self._trigger[type] || {};
self._trigger[type][phase] = self._trigger[type][phase] || [];
self._trigger[type][phase].push({
id: id,
method: method
});
return true;
}
return false;
},
/**
*
* @param {String} id The id of the trigger to remove.
* @param {Number} type The type of operation to remove the trigger from. See
* Mixin.Constants for constants to use.
* @param {Number} phase The phase of the operation to remove the trigger from.
* See Mixin.Constants for constants to use.
* @returns {boolean} True if removed successfully, false if not.
*/
removeTrigger: function (id, type, phase) {
var self = this,
triggerIndex;
// Check if the trigger already exists
triggerIndex = self._triggerIndexOf(id, type, phase);
if (triggerIndex > -1) {
// The trigger does not exist, create it
self._trigger[type][phase].splice(triggerIndex, 1);
}
return false;
},
/**
* Checks if a trigger will fire based on the type and phase provided.
* @param {Number} type The type of operation. See Mixin.Constants for
* constants to use.
* @param {Number} phase The phase of the operation. See Mixin.Constants
* for constants to use.
* @returns {Boolean} True if the trigger will fire, false otherwise.
*/
willTrigger: function (type, phase) {
return this._trigger && this._trigger[type] && this._trigger[type][phase] && this._trigger[type][phase].length;
},
/**
* Processes trigger actions based on the operation, type and phase.
* @param {Object} operation Operation data to pass to the trigger.
* @param {Number} type The type of operation. See Mixin.Constants for
* constants to use.
* @param {Number} phase The phase of the operation. See Mixin.Constants
* for constants to use.
* @param {Object} oldDoc The document snapshot before operations are
* carried out against the data.
* @param {Object} newDoc The document snapshot after operations are
* carried out against the data.
* @returns {boolean}
*/
processTrigger: function (operation, type, phase, oldDoc, newDoc) {
var self = this,
triggerArr,
triggerIndex,
triggerCount,
triggerItem,
response;
if (self._trigger && self._trigger[type] && self._trigger[type][phase]) {
triggerArr = self._trigger[type][phase];
triggerCount = triggerArr.length;
for (triggerIndex = 0; triggerIndex < triggerCount; triggerIndex++) {
triggerItem = triggerArr[triggerIndex];
if (this.debug()) {
var typeName,
phaseName;
switch (type) {
case this.TYPE_INSERT:
typeName = 'insert';
break;
case this.TYPE_UPDATE:
typeName = 'update';
break;
case this.TYPE_REMOVE:
typeName = 'remove';
break;
default:
typeName = '';
break;
}
switch (phase) {
case this.PHASE_BEFORE:
phaseName = 'before';
break;
case this.PHASE_AFTER:
phaseName = 'after';
break;
default:
phaseName = '';
break;
}
//console.log('Triggers: Processing trigger "' + id + '" for ' + typeName + ' in phase "' + phaseName + '"');
}
// Run the trigger's method and store the response
response = triggerItem.method.call(self, operation, oldDoc, newDoc);
// Check the response for a non-expected result (anything other than
// undefined, true or false is considered a throwable error)
if (response === false) {
// The trigger wants us to cancel operations
return false;
}
if (response !== undefined && response !== true && response !== false) {
// Trigger responded with error, throw the error
throw('ForerunnerDB.Mixin.Triggers: Trigger error: ' + response);
}
}
// Triggers all ran without issue, return a success (true)
return true;
}
},
/**
* Returns the index of a trigger by id based on type and phase.
* @param {String} id The id of the trigger to find the index of.
* @param {Number} type The type of operation. See Mixin.Constants for
* constants to use.
* @param {Number} phase The phase of the operation. See Mixin.Constants
* for constants to use.
* @returns {number}
* @private
*/
_triggerIndexOf: function (id, type, phase) {
var self = this,
triggerArr,
triggerCount,
triggerIndex;
if (self._trigger && self._trigger[type] && self._trigger[type][phase]) {
triggerArr = self._trigger[type][phase];
triggerCount = triggerArr.length;
for (triggerIndex = 0; triggerIndex < triggerCount; triggerIndex++) {
if (triggerArr[triggerIndex].id === id) {
return triggerIndex;
}
}
}
return -1;
}
};
module.exports = Triggers;
},{}],21:[function(_dereq_,module,exports){
"use strict";
// Grab the view class
var Shared,
Core,
OldView,
OldViewInit;
Shared = _dereq_('./Shared');
Core = Shared.modules.Core;
OldView = Shared.modules.OldView;
OldViewInit = OldView.prototype.init;
OldView.prototype.init = function () {
var self = this;
this._binds = [];
this._renderStart = 0;
this._renderEnd = 0;
this._deferQueue = {
insert: [],
update: [],
remove: [],
upsert: [],
_bindInsert: [],
_bindUpdate: [],
_bindRemove: [],
_bindUpsert: []
};
this._deferThreshold = {
insert: 100,
update: 100,
remove: 100,
upsert: 100,
_bindInsert: 100,
_bindUpdate: 100,
_bindRemove: 100,
_bindUpsert: 100
};
this._deferTime = {
insert: 100,
update: 1,
remove: 1,
upsert: 1,
_bindInsert: 100,
_bindUpdate: 1,
_bindRemove: 1,
_bindUpsert: 1
};
OldViewInit.apply(this, arguments);
// Hook view events to update binds
this.on('insert', function (successArr, failArr) {
self._bindEvent('insert', successArr, failArr);
});
this.on('update', function (successArr, failArr) {
self._bindEvent('update', successArr, failArr);
});
this.on('remove', function (successArr, failArr) {
self._bindEvent('remove', successArr, failArr);
});
this.on('change', self._bindChange);
};
/**
* Binds a selector to the insert, update and delete events of a particular
* view and keeps the selector in sync so that updates are reflected on the
* web page in real-time.
*
* @param {String} selector The jQuery selector string to get target elements.
* @param {Object} options The options object.
*/
OldView.prototype.bind = function (selector, options) {
if (options && options.template) {
this._binds[selector] = options;
} else {
throw('ForerunnerDB.OldView "' + this.name() + '": Cannot bind data to element, missing options information!');
}
return this;
};
/**
* Un-binds a selector from the view changes.
* @param {String} selector The jQuery selector string to identify the bind to remove.
* @returns {Collection}
*/
OldView.prototype.unBind = function (selector) {
delete this._binds[selector];
return this;
};
/**
* Returns true if the selector is bound to the view.
* @param {String} selector The jQuery selector string to identify the bind to check for.
* @returns {boolean}
*/
OldView.prototype.isBound = function (selector) {
return Boolean(this._binds[selector]);
};
/**
* Sorts items in the DOM based on the bind settings and the passed item array.
* @param {String} selector The jQuery selector of the bind container.
* @param {Array} itemArr The array of items used to determine the order the DOM
* elements should be in based on the order they are in, in the array.
*/
OldView.prototype.bindSortDom = function (selector, itemArr) {
var container = window.jQuery(selector),
arrIndex,
arrItem,
domItem;
if (this.debug()) {
console.log('ForerunnerDB.OldView.Bind: Sorting data in DOM...', itemArr);
}
for (arrIndex = 0; arrIndex < itemArr.length; arrIndex++) {
arrItem = itemArr[arrIndex];
// Now we've done our inserts into the DOM, let's ensure
// they are still ordered correctly
domItem = container.find('#' + arrItem[this._primaryKey]);
if (domItem.length) {
if (arrIndex === 0) {
if (this.debug()) {
console.log('ForerunnerDB.OldView.Bind: Sort, moving to index 0...', domItem);
}
container.prepend(domItem);
} else {
if (this.debug()) {
console.log('ForerunnerDB.OldView.Bind: Sort, moving to index ' + arrIndex + '...', domItem);
}
domItem.insertAfter(container.children(':eq(' + (arrIndex - 1) + ')'));
}
} else {
if (this.debug()) {
console.log('ForerunnerDB.OldView.Bind: Warning, element for array item not found!', arrItem);
}
}
}
};
OldView.prototype.bindRefresh = function (obj) {
var binds = this._binds,
bindKey,
bind;
if (!obj) {
// Grab current data
obj = {
data: this.find()
};
}
for (bindKey in binds) {
if (binds.hasOwnProperty(bindKey)) {
bind = binds[bindKey];
if (this.debug()) { console.log('ForerunnerDB.OldView.Bind: Sorting DOM...'); }
this.bindSortDom(bindKey, obj.data);
if (bind.afterOperation) {
bind.afterOperation();
}
if (bind.refresh) {
bind.refresh();
}
}
}
};
/**
* Renders a bind view data to the DOM.
* @param {String} bindSelector The jQuery selector string to use to identify
* the bind target. Must match the selector used when defining the original bind.
* @param {Function=} domHandler If specified, this handler method will be called
* with the final HTML for the view instead of the DB handling the DOM insertion.
*/
OldView.prototype.bindRender = function (bindSelector, domHandler) {
// Check the bind exists
var bind = this._binds[bindSelector],
domTarget = window.jQuery(bindSelector),
allData,
dataItem,
itemHtml,
finalHtml = window.jQuery('<ul></ul>'),
bindCallback,
i;
if (bind) {
allData = this._data.find();
bindCallback = function (itemHtml) {
finalHtml.append(itemHtml);
};
// Loop all items and add them to the screen
for (i = 0; i < allData.length; i++) {
dataItem = allData[i];
itemHtml = bind.template(dataItem, bindCallback);
}
if (!domHandler) {
domTarget.append(finalHtml.html());
} else {
domHandler(bindSelector, finalHtml.html());
}
}
};
OldView.prototype.processQueue = function (type, callback) {
var queue = this._deferQueue[type],
deferThreshold = this._deferThreshold[type],
deferTime = this._deferTime[type];
if (queue.length) {
var self = this,
dataArr;
// Process items up to the threshold
if (queue.length) {
if (queue.length > deferThreshold) {
// Grab items up to the threshold value
dataArr = queue.splice(0, deferThreshold);
} else {
// Grab all the remaining items
dataArr = queue.splice(0, queue.length);
}
this._bindEvent(type, dataArr, []);
}
// Queue another process
setTimeout(function () {
self.processQueue(type, callback);
}, deferTime);
} else {
if (callback) { callback(); }
this.emit('bindQueueComplete');
}
};
OldView.prototype._bindEvent = function (type, successArr, failArr) {
/*var queue = this._deferQueue[type],
deferThreshold = this._deferThreshold[type],
deferTime = this._deferTime[type];*/
var binds = this._binds,
unfilteredDataSet = this.find({}),
filteredDataSet,
bindKey;
// Check if the number of inserts is greater than the defer threshold
/*if (successArr && successArr.length > deferThreshold) {
// Break up upsert into blocks
this._deferQueue[type] = queue.concat(successArr);
// Fire off the insert queue handler
this.processQueue(type);
return;
} else {*/
for (bindKey in binds) {
if (binds.hasOwnProperty(bindKey)) {
if (binds[bindKey].reduce) {
filteredDataSet = this.find(binds[bindKey].reduce.query, binds[bindKey].reduce.options);
} else {
filteredDataSet = unfilteredDataSet;
}
switch (type) {
case 'insert':
this._bindInsert(bindKey, binds[bindKey], successArr, failArr, filteredDataSet);
break;
case 'update':
this._bindUpdate(bindKey, binds[bindKey], successArr, failArr, filteredDataSet);
break;
case 'remove':
this._bindRemove(bindKey, binds[bindKey], successArr, failArr, filteredDataSet);
break;
}
}
}
//}
};
OldView.prototype._bindChange = function (newDataArr) {
if (this.debug()) {
console.log('ForerunnerDB.OldView.Bind: Bind data change, refreshing bind...', newDataArr);
}
this.bindRefresh(newDataArr);
};
OldView.prototype._bindInsert = function (selector, options, successArr, failArr, all) {
var container = window.jQuery(selector),
itemElem,
itemHtml,
makeCallback,
i;
makeCallback = function (itemElem, insertedItem, failArr, all) {
return function (itemHtml) {
// Check if there is custom DOM insert method
if (options.insert) {
options.insert(itemHtml, insertedItem, failArr, all);
} else {
// Handle the insert automatically
// Add the item to the container
if (options.prependInsert) {
container.prepend(itemHtml);
} else {
container.append(itemHtml);
}
}
if (options.afterInsert) {
options.afterInsert(itemHtml, insertedItem, failArr, all);
}
};
};
// Loop the inserted items
for (i = 0; i < successArr.length; i++) {
// Check for existing item in the container
itemElem = container.find('#' + successArr[i][this._primaryKey]);
if (!itemElem.length) {
itemHtml = options.template(successArr[i], makeCallback(itemElem, successArr[i], failArr, all));
}
}
};
OldView.prototype._bindUpdate = function (selector, options, successArr, failArr, all) {
var container = window.jQuery(selector),
itemElem,
makeCallback,
i;
makeCallback = function (itemElem, itemData) {
return function (itemHtml) {
// Check if there is custom DOM insert method
if (options.update) {
options.update(itemHtml, itemData, all, itemElem.length ? 'update' : 'append');
} else {
if (itemElem.length) {
// An existing item is in the container, replace it with the
// new rendered item from the updated data
itemElem.replaceWith(itemHtml);
} else {
// The item element does not already exist, append it
if (options.prependUpdate) {
container.prepend(itemHtml);
} else {
container.append(itemHtml);
}
}
}
if (options.afterUpdate) {
options.afterUpdate(itemHtml, itemData, all);
}
};
};
// Loop the updated items
for (i = 0; i < successArr.length; i++) {
// Check for existing item in the container
itemElem = container.find('#' + successArr[i][this._primaryKey]);
options.template(successArr[i], makeCallback(itemElem, successArr[i]));
}
};
OldView.prototype._bindRemove = function (selector, options, successArr, failArr, all) {
var container = window.jQuery(selector),
itemElem,
makeCallback,
i;
makeCallback = function (itemElem, data, all) {
return function () {
if (options.remove) {
options.remove(itemElem, data, all);
} else {
itemElem.remove();
if (options.afterRemove) {
options.afterRemove(itemElem, data, all);
}
}
};
};
// Loop the removed items
for (i = 0; i < successArr.length; i++) {
// Check for existing item in the container
itemElem = container.find('#' + successArr[i][this._primaryKey]);
if (itemElem.length) {
if (options.beforeRemove) {
options.beforeRemove(itemElem, successArr[i], all, makeCallback(itemElem, successArr[i], all));
} else {
if (options.remove) {
options.remove(itemElem, successArr[i], all);
} else {
itemElem.remove();
if (options.afterRemove) {
options.afterRemove(itemElem, successArr[i], all);
}
}
}
}
}
};
},{"./Shared":29}],22:[function(_dereq_,module,exports){
"use strict";
// Import external names locally
var Shared,
Core,
CollectionGroup,
Collection,
CollectionInit,
CollectionGroupInit,
CoreInit;
Shared = _dereq_('./Shared');
/**
* The view constructor.
* @param viewName
* @constructor
*/
var OldView = function (viewName) {
this.init.apply(this, arguments);
};
OldView.prototype.init = function (viewName) {
var self = this;
this._name = viewName;
this._listeners = {};
this._query = {
query: {},
options: {}
};
// Register listeners for the CRUD events
this._onFromSetData = function () {
self._onSetData.apply(self, arguments);
};
this._onFromInsert = function () {
self._onInsert.apply(self, arguments);
};
this._onFromUpdate = function () {
self._onUpdate.apply(self, arguments);
};
this._onFromRemove = function () {
self._onRemove.apply(self, arguments);
};
this._onFromChange = function () {
if (self.debug()) { console.log('ForerunnerDB.OldView: Received change'); }
self._onChange.apply(self, arguments);
};
};
Shared.addModule('OldView', OldView);
CollectionGroup = _dereq_('./CollectionGroup');
Collection = _dereq_('./Collection');
CollectionInit = Collection.prototype.init;
CollectionGroupInit = CollectionGroup.prototype.init;
Core = Shared.modules.Core;
CoreInit = Core.prototype.init;
Shared.mixin(OldView.prototype, 'Mixin.Events');
/**
* Drops a view and all it's stored data from the database.
* @returns {boolean} True on success, false on failure.
*/
OldView.prototype.drop = function () {
if ((this._db || this._from) && this._name) {
if (this.debug()) {
console.log('ForerunnerDB.OldView: Dropping view ' + this._name);
}
this._state = 'dropped';
this.emit('drop', this);
if (this._db && this._db._oldViews) {
delete this._db._oldViews[this._name];
}
if (this._from && this._from._oldViews) {
delete this._from._oldViews[this._name];
}
return true;
}
return false;
};
OldView.prototype.debug = function () {
// TODO: Make this function work
return false;
};
/**
* Gets / sets the DB the view is bound against. Automatically set
* when the db.oldView(viewName) method is called.
* @param db
* @returns {*}
*/
OldView.prototype.db = function (db) {
if (db !== undefined) {
this._db = db;
return this;
}
return this._db;
};
/**
* Gets / sets the collection that the view derives it's data from.
* @param {*} collection A collection instance or the name of a collection
* to use as the data set to derive view data from.
* @returns {*}
*/
OldView.prototype.from = function (collection) {
if (collection !== undefined) {
// Check if this is a collection name or a collection instance
if (typeof(collection) === 'string') {
if (this._db.collectionExists(collection)) {
collection = this._db.collection(collection);
} else {
throw('ForerunnerDB.OldView "' + this.name() + '": Invalid collection in view.from() call.');
}
}
// Check if the existing from matches the passed one
if (this._from !== collection) {
// Check if we already have a collection assigned
if (this._from) {
// Remove ourselves from the collection view lookup
this.removeFrom();
}
this.addFrom(collection);
}
return this;
}
return this._from;
};
OldView.prototype.addFrom = function (collection) {
//var self = this;
this._from = collection;
if (this._from) {
this._from.on('setData', this._onFromSetData);
//this._from.on('insert', this._onFromInsert);
//this._from.on('update', this._onFromUpdate);
//this._from.on('remove', this._onFromRemove);
this._from.on('change', this._onFromChange);
// Add this view to the collection's view lookup
this._from._addOldView(this);
this._primaryKey = this._from._primaryKey;
this.refresh();
return this;
} else {
throw('ForerunnerDB.OldView "' + this.name() + '": Cannot determine collection type in view.from()');
}
};
OldView.prototype.removeFrom = function () {
// Unsubscribe from events on this "from"
this._from.off('setData', this._onFromSetData);
//this._from.off('insert', this._onFromInsert);
//this._from.off('update', this._onFromUpdate);
//this._from.off('remove', this._onFromRemove);
this._from.off('change', this._onFromChange);
this._from._removeOldView(this);
};
/**
* Gets the primary key for this view from the assigned collection.
* @returns {String}
*/
OldView.prototype.primaryKey = function () {
if (this._from) {
return this._from.primaryKey();
}
return undefined;
};
/**
* Gets / sets the query that the view uses to build it's data set.
* @param {Object=} query
* @param {Boolean=} options An options object.
* @param {Boolean=} refresh Whether to refresh the view data after
* this operation. Defaults to true.
* @returns {*}
*/
OldView.prototype.queryData = function (query, options, refresh) {
if (query !== undefined) {
this._query.query = query;
}
if (options !== undefined) {
this._query.options = options;
}
if (query !== undefined || options !== undefined) {
if (refresh === undefined || refresh === true) {
this.refresh();
}
return this;
}
return this._query;
};
/**
* Add data to the existing query.
* @param {Object} obj The data whose keys will be added to the existing
* query object.
* @param {Boolean} overwrite Whether or not to overwrite data that already
* exists in the query object. Defaults to true.
* @param {Boolean=} refresh Whether or not to refresh the view data set
* once the operation is complete. Defaults to true.
*/
OldView.prototype.queryAdd = function (obj, overwrite, refresh) {
var query = this._query.query,
i;
if (obj !== undefined) {
// Loop object properties and add to existing query
for (i in obj) {
if (obj.hasOwnProperty(i)) {
if (query[i] === undefined || (query[i] !== undefined && overwrite)) {
query[i] = obj[i];
}
}
}
}
if (refresh === undefined || refresh === true) {
this.refresh();
}
};
/**
* Remove data from the existing query.
* @param {Object} obj The data whose keys will be removed from the existing
* query object.
* @param {Boolean=} refresh Whether or not to refresh the view data set
* once the operation is complete. Defaults to true.
*/
OldView.prototype.queryRemove = function (obj, refresh) {
var query = this._query.query,
i;
if (obj !== undefined) {
// Loop object properties and add to existing query
for (i in obj) {
if (obj.hasOwnProperty(i)) {
delete query[i];
}
}
}
if (refresh === undefined || refresh === true) {
this.refresh();
}
};
/**
* Gets / sets the query being used to generate the view data.
* @param {Object=} query The query to set.
* @param {Boolean=} refresh Whether to refresh the view data after
* this operation. Defaults to true.
* @returns {*}
*/
OldView.prototype.query = function (query, refresh) {
if (query !== undefined) {
this._query.query = query;
if (refresh === undefined || refresh === true) {
this.refresh();
}
return this;
}
return this._query.query;
};
/**
* Gets / sets the query options used when applying sorting etc to the
* view data set.
* @param {Object=} options An options object.
* @param {Boolean=} refresh Whether to refresh the view data after
* this operation. Defaults to true.
* @returns {*}
*/
OldView.prototype.queryOptions = function (options, refresh) {
if (options !== undefined) {
this._query.options = options;
if (refresh === undefined || refresh === true) {
this.refresh();
}
return this;
}
return this._query.options;
};
/**
* Refreshes the view data and diffs between previous and new data to
* determine if any events need to be triggered or DOM binds updated.
*/
OldView.prototype.refresh = function (force) {
if (this._from) {
// Take a copy of the data before updating it, we will use this to
// "diff" between the old and new data and handle DOM bind updates
var oldData = this._data,
oldDataArr,
oldDataItem,
newData,
newDataArr,
query,
primaryKey,
dataItem,
inserted = [],
updated = [],
removed = [],
operated = false,
i;
if (this.debug()) {
console.log('ForerunnerDB.OldView: Refreshing view ' + this._name);
console.log('ForerunnerDB.OldView: Existing data: ' + (typeof(this._data) !== "undefined"));
if (typeof(this._data) !== "undefined") {
console.log('ForerunnerDB.OldView: Current data rows: ' + this._data.find().length);
}
//console.log(OldView.prototype.refresh.caller);
}
// Query the collection and update the data
if (this._query) {
if (this.debug()) {
console.log('ForerunnerDB.OldView: View has query and options, getting subset...');
}
// Run query against collection
//console.log('refresh with query and options', this._query.options);
this._data = this._from.subset(this._query.query, this._query.options);
//console.log(this._data);
} else {
// No query, return whole collection
if (this._query.options) {
if (this.debug()) {
console.log('ForerunnerDB.OldView: View has options, getting subset...');
}
this._data = this._from.subset({}, this._query.options);
} else {
if (this.debug()) {
console.log('ForerunnerDB.OldView: View has no query or options, getting subset...');
}
this._data = this._from.subset({});
}
}
// Check if there was old data
if (!force && oldData) {
if (this.debug()) {
console.log('ForerunnerDB.OldView: Refresh not forced, old data detected...');
}
// Now determine the difference
newData = this._data;
if (oldData.subsetOf() === newData.subsetOf()) {
if (this.debug()) {
console.log('ForerunnerDB.OldView: Old and new data are from same collection...');
}
newDataArr = newData.find();
oldDataArr = oldData.find();
primaryKey = newData._primaryKey;
// The old data and new data were derived from the same parent collection
// so scan the data to determine changes
for (i = 0; i < newDataArr.length; i++) {
dataItem = newDataArr[i];
query = {};
query[primaryKey] = dataItem[primaryKey];
// Check if this item exists in the old data
oldDataItem = oldData.find(query)[0];
if (!oldDataItem) {
// New item detected
inserted.push(dataItem);
} else {
// Check if an update has occurred
if (JSON.stringify(oldDataItem) !== JSON.stringify(dataItem)) {
// Updated / already included item detected
updated.push(dataItem);
}
}
}
// Now loop the old data and check if any records were removed
for (i = 0; i < oldDataArr.length; i++) {
dataItem = oldDataArr[i];
query = {};
query[primaryKey] = dataItem[primaryKey];
// Check if this item exists in the old data
if (!newData.find(query)[0]) {
// Removed item detected
removed.push(dataItem);
}
}
if (this.debug()) {
console.log('ForerunnerDB.OldView: Removed ' + removed.length + ' rows');
console.log('ForerunnerDB.OldView: Inserted ' + inserted.length + ' rows');
console.log('ForerunnerDB.OldView: Updated ' + updated.length + ' rows');
}
// Now we have a diff of the two data sets, we need to get the DOM updated
if (inserted.length) {
this._onInsert(inserted, []);
operated = true;
}
if (updated.length) {
this._onUpdate(updated, []);
operated = true;
}
if (removed.length) {
this._onRemove(removed, []);
operated = true;
}
} else {
// The previous data and the new data are derived from different collections
// and can therefore not be compared, all data is therefore effectively "new"
// so first perform a remove of all existing data then do an insert on all new data
if (this.debug()) {
console.log('ForerunnerDB.OldView: Old and new data are from different collections...');
}
removed = oldData.find();
if (removed.length) {
this._onRemove(removed);
operated = true;
}
inserted = newData.find();
if (inserted.length) {
this._onInsert(inserted);
operated = true;
}
}
} else {
// Force an update as if the view never got created by padding all elements
// to the insert
if (this.debug()) {
console.log('ForerunnerDB.OldView: Forcing data update', newDataArr);
}
this._data = this._from.subset(this._query.query, this._query.options);
newDataArr = this._data.find();
if (this.debug()) {
console.log('ForerunnerDB.OldView: Emitting change event with data', newDataArr);
}
this._onInsert(newDataArr, []);
}
if (this.debug()) { console.log('ForerunnerDB.OldView: Emitting change'); }
this.emit('change');
}
return this;
};
/**
* Returns the number of documents currently in the view.
* @returns {Number}
*/
OldView.prototype.count = function () {
return this._data && this._data._data ? this._data._data.length : 0;
};
/**
* Queries the view data. See Collection.find() for more information.
* @returns {*}
*/
OldView.prototype.find = function () {
if (this._data) {
if (this.debug()) {
console.log('ForerunnerDB.OldView: Finding data in view collection...', this._data);
}
return this._data.find.apply(this._data, arguments);
} else {
return [];
}
};
/**
* Inserts into view data via the view collection. See Collection.insert() for more information.
* @returns {*}
*/
OldView.prototype.insert = function () {
if (this._from) {
// Pass the args through to the from object
return this._from.insert.apply(this._from, arguments);
} else {
return [];
}
};
/**
* Updates into view data via the view collection. See Collection.update() for more information.
* @returns {*}
*/
OldView.prototype.update = function () {
if (this._from) {
// Pass the args through to the from object
return this._from.update.apply(this._from, arguments);
} else {
return [];
}
};
/**
* Removed from view data via the view collection. See Collection.remove() for more information.
* @returns {*}
*/
OldView.prototype.remove = function () {
if (this._from) {
// Pass the args through to the from object
return this._from.remove.apply(this._from, arguments);
} else {
return [];
}
};
OldView.prototype._onSetData = function (newDataArr, oldDataArr) {
this.emit('remove', oldDataArr, []);
this.emit('insert', newDataArr, []);
//this.refresh();
};
OldView.prototype._onInsert = function (successArr, failArr) {
this.emit('insert', successArr, failArr);
//this.refresh();
};
OldView.prototype._onUpdate = function (successArr, failArr) {
this.emit('update', successArr, failArr);
//this.refresh();
};
OldView.prototype._onRemove = function (successArr, failArr) {
this.emit('remove', successArr, failArr);
//this.refresh();
};
OldView.prototype._onChange = function () {
if (this.debug()) { console.log('ForerunnerDB.OldView: Refreshing data'); }
this.refresh();
};
// Extend collection with view init
Collection.prototype.init = function () {
this._oldViews = [];
CollectionInit.apply(this, arguments);
};
/**
* Adds a view to the internal view lookup.
* @param {View} view The view to add.
* @returns {Collection}
* @private
*/
Collection.prototype._addOldView = function (view) {
if (view !== undefined) {
this._oldViews[view._name] = view;
}
return this;
};
/**
* Removes a view from the internal view lookup.
* @param {View} view The view to remove.
* @returns {Collection}
* @private
*/
Collection.prototype._removeOldView = function (view) {
if (view !== undefined) {
delete this._oldViews[view._name];
}
return this;
};
// Extend collection with view init
CollectionGroup.prototype.init = function () {
this._oldViews = [];
CollectionGroupInit.apply(this, arguments);
};
/**
* Adds a view to the internal view lookup.
* @param {View} view The view to add.
* @returns {Collection}
* @private
*/
CollectionGroup.prototype._addOldView = function (view) {
if (view !== undefined) {
this._oldViews[view._name] = view;
}
return this;
};
/**
* Removes a view from the internal view lookup.
* @param {View} view The view to remove.
* @returns {Collection}
* @private
*/
CollectionGroup.prototype._removeOldView = function (view) {
if (view !== undefined) {
delete this._oldViews[view._name];
}
return this;
};
// Extend DB with views init
Core.prototype.init = function () {
this._oldViews = {};
CoreInit.apply(this, arguments);
};
/**
* Gets a view by it's name.
* @param {String} viewName The name of the view to retrieve.
* @returns {*}
*/
Core.prototype.oldView = function (viewName) {
if (!this._oldViews[viewName]) {
if (this.debug()) {
console.log('ForerunnerDB.OldView: Creating view ' + viewName);
}
}
this._oldViews[viewName] = this._oldViews[viewName] || new OldView(viewName).db(this);
return this._oldViews[viewName];
};
/**
* Determine if a view with the passed name already exists.
* @param {String} viewName The name of the view to check for.
* @returns {boolean}
*/
Core.prototype.oldViewExists = function (viewName) {
return Boolean(this._oldViews[viewName]);
};
/**
* Returns an array of views the DB currently has.
* @returns {Array} An array of objects containing details of each view
* the database is currently managing.
*/
Core.prototype.oldViews = function () {
var arr = [],
i;
for (i in this._oldViews) {
if (this._oldViews.hasOwnProperty(i)) {
arr.push({
name: i,
count: this._oldViews[i].count()
});
}
}
return arr;
};
Shared.finishModule('OldView');
module.exports = OldView;
},{"./Collection":3,"./CollectionGroup":4,"./Shared":29}],23:[function(_dereq_,module,exports){
"use strict";
var Shared = _dereq_('./Shared'),
Path = _dereq_('./Path');
/**
* The operation class, used to store details about an operation being
* performed by the database.
* @param {String} name The name of the operation.
* @constructor
*/
var Operation = function (name) {
this.pathSolver = new Path();
this.counter = 0;
this.init.apply(this, arguments);
};
Operation.prototype.init = function (name) {
this._data = {
operation: name, // The name of the operation executed such as "find", "update" etc
index: {
potential: [], // Indexes that could have potentially been used
used: false // The index that was picked to use
},
steps: [], // The steps taken to generate the query results,
time: {
startMs: 0,
stopMs: 0,
totalMs: 0,
process: {}
},
flag: {}, // An object with flags that denote certain execution paths
log: [] // Any extra data that might be useful such as warnings or helpful hints
};
};
Shared.addModule('Operation', Operation);
Shared.mixin(Operation.prototype, 'Mixin.ChainReactor');
/**
* Starts the operation timer.
*/
Operation.prototype.start = function () {
this._data.time.startMs = new Date().getTime();
};
/**
* Adds an item to the operation log.
* @param {String} event The item to log.
* @returns {*}
*/
Operation.prototype.log = function (event) {
if (event) {
var lastLogTime = this._log.length > 0 ? this._data.log[this._data.log.length - 1].time : 0,
logObj = {
event: event,
time: new Date().getTime(),
delta: 0
};
this._data.log.push(logObj);
if (lastLogTime) {
logObj.delta = logObj.time - lastLogTime;
}
return this;
}
return this._data.log;
};
/**
* Called when starting and ending a timed operation, used to time
* internal calls within an operation's execution.
* @param {String} section An operation name.
* @returns {*}
*/
Operation.prototype.time = function (section) {
if (section !== undefined) {
var process = this._data.time.process,
processObj = process[section] = process[section] || {};
if (!processObj.startMs) {
// Timer started
processObj.startMs = new Date().getTime();
processObj.stepObj = {
name: section
};
this._data.steps.push(processObj.stepObj);
} else {
processObj.stopMs = new Date().getTime();
processObj.totalMs = processObj.stopMs - processObj.startMs;
processObj.stepObj.totalMs = processObj.totalMs;
delete processObj.stepObj;
}
return this;
}
return this._data.time;
};
/**
* Used to set key/value flags during operation execution.
* @param {String} key
* @param {String} val
* @returns {*}
*/
Operation.prototype.flag = function (key, val) {
if (key !== undefined && val !== undefined) {
this._data.flag[key] = val;
} else if (key !== undefined) {
return this._data.flag[key];
} else {
return this._data.flag;
}
};
Operation.prototype.data = function (path, val, noTime) {
if (val !== undefined) {
// Assign value to object path
this.pathSolver.set(this._data, path, val);
return this;
}
return this.pathSolver.get(this._data, path);
};
Operation.prototype.pushData = function (path, val, noTime) {
// Assign value to object path
this.pathSolver.push(this._data, path, val);
};
/**
* Stops the operation timer.
*/
Operation.prototype.stop = function () {
this._data.time.stopMs = new Date().getTime();
this._data.time.totalMs = this._data.time.stopMs - this._data.time.startMs;
};
Shared.finishModule('Operation');
module.exports = Operation;
},{"./Path":26,"./Shared":29}],24:[function(_dereq_,module,exports){
"use strict";
/**
* Allows a method to accept overloaded calls with different parameters controlling
* which passed overload function is called.
* @param {Object} def
* @returns {Function}
* @constructor
*/
var Overload = function (def) {
if (def) {
var self = this,
index,
count,
tmpDef,
defNewKey,
sigIndex,
signatures;
if (!(def instanceof Array)) {
tmpDef = {};
// Def is an object, make sure all prop names are devoid of spaces
for (index in def) {
if (def.hasOwnProperty(index)) {
defNewKey = index.replace(/ /g, '');
// Check if the definition array has a * string in it
if (defNewKey.indexOf('*') === -1) {
// No * found
tmpDef[defNewKey] = def[index];
} else {
// A * was found, generate the different signatures that this
// definition could represent
signatures = this.generateSignaturePermutations(defNewKey);
for (sigIndex = 0; sigIndex < signatures.length; sigIndex++) {
if (!tmpDef[signatures[sigIndex]]) {
tmpDef[signatures[sigIndex]] = def[index];
}
}
}
}
}
def = tmpDef;
}
return function () {
var arr = [],
lookup,
type;
// Check if we are being passed a key/function object or an array of functions
if (def instanceof Array) {
// We were passed an array of functions
count = def.length;
for (index = 0; index < count; index++) {
if (def[index].length === arguments.length) {
return self.callExtend(this, '$main', def, def[index], arguments);
}
}
} else {
// Generate lookup key from arguments
// Copy arguments to an array
for (index = 0; index < arguments.length; index++) {
type = typeof arguments[index];
// Handle detecting arrays
if (type === 'object' && arguments[index] instanceof Array) {
type = 'array';
}
// Add the type to the argument types array
arr.push(type);
}
lookup = arr.join(',');
// Check for an exact lookup match
if (def[lookup]) {
return self.callExtend(this, '$main', def, def[lookup], arguments);
} else {
for (index = arr.length; index >= 0; index--) {
// Get the closest match
lookup = arr.slice(0, index).join(',');
if (def[lookup + ',...']) {
// Matched against arguments + "any other"
return self.callExtend(this, '$main', def, def[lookup + ',...'], arguments);
}
}
}
}
throw('ForerunnerDB.Overload "' + this.name() + '": Overloaded method does not have a matching signature for the passed arguments: ' + JSON.stringify(arr));
};
}
return function () {};
};
/**
* Generates an array of all the different definition signatures that can be
* created from the passed string with a catch-all wildcard *. E.g. it will
* convert the signature: string,*,string to all potentials:
* string,string,string
* string,number,string
* string,object,string,
* string,function,string,
* string,undefined,string
*
* @param {String} str Signature string with a wildcard in it.
* @returns {Array} An array of signature strings that are generated.
*/
Overload.prototype.generateSignaturePermutations = function (str) {
var signatures = [],
newSignature,
types = ['string', 'object', 'number', 'function', 'undefined'],
index;
if (str.indexOf('*') > -1) {
// There is at least one "any" type, break out into multiple keys
// We could do this at query time with regular expressions but
// would be significantly slower
for (index = 0; index < types.length; index++) {
newSignature = str.replace('*', types[index]);
signatures = signatures.concat(this.generateSignaturePermutations(newSignature));
}
} else {
signatures.push(str);
}
return signatures;
};
Overload.prototype.callExtend = function (context, prop, propContext, func, args) {
var tmp,
ret;
if (context && propContext[prop]) {
tmp = context[prop];
context[prop] = propContext[prop];
ret = func.apply(context, args);
context[prop] = tmp;
return ret;
} else {
return func.apply(context, args);
}
};
module.exports = Overload;
},{}],25:[function(_dereq_,module,exports){
"use strict";
// Import external names locally
var Shared,
Core,
CoreInit,
Collection,
DbDocument;
Shared = _dereq_('./Shared');
var Overview = function () {
this.init.apply(this, arguments);
};
Overview.prototype.init = function (name) {
var self = this;
this._name = name;
this._data = new DbDocument('__FDB__dc_data_' + this._name);
this._collData = new Collection();
this._collections = [];
this._collectionDroppedWrap = function () {
self._collectionDropped.apply(self, arguments);
};
};
Shared.addModule('Overview', Overview);
Shared.mixin(Overview.prototype, 'Mixin.Common');
Shared.mixin(Overview.prototype, 'Mixin.ChainReactor');
Shared.mixin(Overview.prototype, 'Mixin.Constants');
Shared.mixin(Overview.prototype, 'Mixin.Triggers');
Shared.mixin(Overview.prototype, 'Mixin.Events');
Collection = _dereq_('./Collection');
DbDocument = _dereq_('./Document');
Core = Shared.modules.Core;
CoreInit = Core.prototype.init;
/**
* Gets / sets the current state.
* @param {String=} val The name of the state to set.
* @returns {*}
*/
Shared.synthesize(Overview.prototype, 'state');
Shared.synthesize(Overview.prototype, 'db');
Shared.synthesize(Overview.prototype, 'name');
Shared.synthesize(Overview.prototype, 'query', function (val) {
var ret = this.$super(val);
if (val !== undefined) {
this._refresh();
}
return ret;
});
Shared.synthesize(Overview.prototype, 'queryOptions', function (val) {
var ret = this.$super(val);
if (val !== undefined) {
this._refresh();
}
return ret;
});
Shared.synthesize(Overview.prototype, 'reduce', function (val) {
var ret = this.$super(val);
if (val !== undefined) {
this._refresh();
}
return ret;
});
Overview.prototype.from = function (collection) {
if (collection !== undefined) {
if (typeof(collection) === 'string') {
collection = this._db.collection(collection);
}
this._addCollection(collection);
return this;
}
return this._collections;
};
Overview.prototype.find = function () {
return this._collData.find.apply(this._collData, arguments);
};
Overview.prototype.count = function () {
return this._collData.count.apply(this._collData, arguments);
};
Overview.prototype._addCollection = function (collection) {
if (this._collections.indexOf(collection) === -1) {
this._collections.push(collection);
collection.chain(this);
collection.on('drop', this._collectionDroppedWrap);
this._refresh();
}
return this;
};
Overview.prototype._removeCollection = function (collection) {
var collectionIndex = this._collections.indexOf(collection);
if (collectionIndex > -1) {
this._collections.splice(collection, 1);
collection.unChain(this);
collection.off('drop', this._collectionDroppedWrap);
this._refresh();
}
return this;
};
Overview.prototype._collectionDropped = function (collection) {
if (collection) {
// Collection was dropped, remove from overview
this._removeCollection(collection);
}
};
Overview.prototype._refresh = function () {
if (this._state !== 'dropped') {
if (this._collections && this._collections[0]) {
this._collData.primaryKey(this._collections[0].primaryKey());
var tempArr = [],
i;
for (i = 0; i < this._collections.length; i++) {
tempArr = tempArr.concat(this._collections[i].find(this._query, this._queryOptions));
}
this._collData.setData(tempArr);
}
// Now execute the reduce method
if (this._reduce) {
var reducedData = this._reduce();
// Update the document with the newly returned data
this._data.setData(reducedData);
}
}
};
Overview.prototype._chainHandler = function (chainPacket) {
switch (chainPacket.type) {
case 'setData':
case 'insert':
case 'update':
case 'remove':
this._refresh();
break;
default:
break;
}
};
/**
* Gets the module's internal data collection.
* @returns {Collection}
*/
Overview.prototype.data = function () {
return this._data;
};
Overview.prototype.drop = function () {
if (this._state !== 'dropped') {
this._state = 'dropped';
delete this._data;
delete this._collData;
// Remove all collection references
while (this._collections.length) {
this._removeCollection(this._collections[0]);
}
delete this._collections;
if (this._db && this._name) {
delete this._db._overview[this._name];
}
delete this._name;
this.emit('drop', this);
}
return true;
};
// Extend DB to include collection groups
Core.prototype.init = function () {
this._overview = {};
CoreInit.apply(this, arguments);
};
Core.prototype.overview = function (overviewName) {
if (overviewName) {
this._overview[overviewName] = this._overview[overviewName] || new Overview(overviewName).db(this);
return this._overview[overviewName];
} else {
// Return an object of collection data
return this._overview;
}
};
Shared.finishModule('Overview');
module.exports = Overview;
},{"./Collection":3,"./Document":7,"./Shared":29}],26:[function(_dereq_,module,exports){
"use strict";
var Shared = _dereq_('./Shared');
/**
* Path object used to resolve object paths and retrieve data from
* objects by using paths.
* @param {String=} path The path to assign.
* @constructor
*/
var Path = function (path) {
this.init.apply(this, arguments);
};
Path.prototype.init = function (path) {
if (path) {
this.path(path);
}
};
Shared.addModule('Path', Path);
Shared.mixin(Path.prototype, 'Mixin.ChainReactor');
/**
* Gets / sets the given path for the Path instance.
* @param {String=} path The path to assign.
*/
Path.prototype.path = function (path) {
if (path !== undefined) {
this._path = this.clean(path);
this._pathParts = this._path.split('.');
return this;
}
return this._path;
};
/**
* Tests if the passed object has the paths that are specified and that
* a value exists in those paths.
* @param {Object} testKeys The object describing the paths to test for.
* @param {Object} testObj The object to test paths against.
* @returns {Boolean} True if the object paths exist.
*/
Path.prototype.hasObjectPaths = function (testKeys, testObj) {
var result = true,
i;
for (i in testKeys) {
if (testKeys.hasOwnProperty(i)) {
if (testObj[i] === undefined) {
return false;
}
if (typeof testKeys[i] === 'object') {
// Recurse object
result = this.hasObjectPaths(testKeys[i], testObj[i]);
// Should we exit early?
if (!result) {
return false;
}
}
}
}
return result;
};
/**
* Counts the total number of key endpoints in the passed object.
* @param {Object} testObj The object to count key endpoints for.
* @returns {Number} The number of endpoints.
*/
Path.prototype.countKeys = function (testObj) {
var totalKeys = 0,
i;
for (i in testObj) {
if (testObj.hasOwnProperty(i)) {
if (testObj[i] !== undefined) {
if (typeof testObj[i] !== 'object') {
totalKeys++;
} else {
totalKeys += this.countKeys(testObj[i]);
}
}
}
}
return totalKeys;
};
/**
* Tests if the passed object has the paths that are specified and that
* a value exists in those paths and if so returns the number matched.
* @param {Object} testKeys The object describing the paths to test for.
* @param {Object} testObj The object to test paths against.
* @returns {Object} Stats on the matched keys
*/
Path.prototype.countObjectPaths = function (testKeys, testObj) {
var matchData,
matchedKeys = {},
matchedKeyCount = 0,
totalKeyCount = 0,
i;
for (i in testObj) {
if (testObj.hasOwnProperty(i)) {
if (typeof testObj[i] === 'object') {
// The test / query object key is an object, recurse
matchData = this.countObjectPaths(testKeys[i], testObj[i]);
matchedKeys[i] = matchData.matchedKeys;
totalKeyCount += matchData.totalKeyCount;
matchedKeyCount += matchData.matchedKeyCount;
} else {
// The test / query object has a property that is not an object so add it as a key
totalKeyCount++;
// Check if the test keys also have this key and it is also not an object
if (testKeys && testKeys[i] && typeof testKeys[i] !== 'object') {
matchedKeys[i] = true;
matchedKeyCount++;
} else {
matchedKeys[i] = false;
}
}
}
}
return {
matchedKeys: matchedKeys,
matchedKeyCount: matchedKeyCount,
totalKeyCount: totalKeyCount
};
};
/**
* Takes a non-recursive object and converts the object hierarchy into
* a path string.
* @param {Object} obj The object to parse.
* @param {Boolean=} withValue If true will include a 'value' key in the returned
* object that represents the value the object path points to.
* @returns {Object}
*/
Path.prototype.parse = function (obj, withValue) {
var paths = [],
path = '',
resultData,
i, k;
for (i in obj) {
if (obj.hasOwnProperty(i)) {
// Set the path to the key
path = i;
if (typeof(obj[i]) === 'object') {
if (withValue) {
resultData = this.parse(obj[i], withValue);
for (k = 0; k < resultData.length; k++) {
paths.push({
path: path + '.' + resultData[k].path,
value: resultData[k].value
});
}
} else {
resultData = this.parse(obj[i]);
for (k = 0; k < resultData.length; k++) {
paths.push({
path: path + '.' + resultData[k].path
});
}
}
} else {
if (withValue) {
paths.push({
path: path,
value: obj[i]
});
} else {
paths.push({
path: path
});
}
}
}
}
return paths;
};
/**
* Takes a non-recursive object and converts the object hierarchy into
* an array of path strings that allow you to target all possible paths
* in an object.
*
* @returns {Array}
*/
Path.prototype.parseArr = function (obj, options) {
options = options || {};
return this._parseArr(obj, '', [], options);
};
Path.prototype._parseArr = function (obj, path, paths, options) {
var i,
newPath = '';
path = path || '';
paths = paths || [];
for (i in obj) {
if (obj.hasOwnProperty(i)) {
if (!options.ignore || (options.ignore && !options.ignore.test(i))) {
if (path) {
newPath = path + '.' + i;
} else {
newPath = i;
}
if (typeof(obj[i]) === 'object') {
this._parseArr(obj[i], newPath, paths, options);
} else {
paths.push(newPath);
}
}
}
}
return paths;
};
/**
* Gets the value(s) that the object contains for the currently assigned path string.
* @param {Object} obj The object to evaluate the path against.
* @param {String=} path A path to use instead of the existing one passed in path().
* @returns {Array} An array of values for the given path.
*/
Path.prototype.value = function (obj, path) {
if (obj !== undefined && typeof obj === 'object') {
var pathParts,
arr,
arrCount,
objPart,
objPartParent,
valuesArr = [],
i, k;
if (path !== undefined) {
path = this.clean(path);
pathParts = path.split('.');
}
arr = pathParts || this._pathParts;
arrCount = arr.length;
objPart = obj;
for (i = 0; i < arrCount; i++) {
objPart = objPart[arr[i]];
if (objPartParent instanceof Array) {
// Search inside the array for the next key
for (k = 0; k < objPartParent.length; k++) {
valuesArr = valuesArr.concat(this.value(objPartParent, k + '.' + arr[i]));
}
return valuesArr;
} else {
if (!objPart || typeof(objPart) !== 'object') {
break;
}
}
objPartParent = objPart;
}
return [objPart];
} else {
return [];
}
};
/**
* Sets a value on an object for the specified path.
* @param {Object} obj The object to update.
* @param {String} path The path to update.
* @param {*} val The value to set the object path to.
* @returns {*}
*/
Path.prototype.set = function (obj, path, val) {
if (obj !== undefined && path !== undefined) {
var pathParts,
part;
path = this.clean(path);
pathParts = path.split('.');
part = pathParts.shift();
if (pathParts.length) {
// Generate the path part in the object if it does not already exist
obj[part] = obj[part] || {};
// Recurse
this.set(obj[part], pathParts.join('.'), val);
} else {
// Set the value
obj[part] = val;
}
}
return obj;
};
Path.prototype.get = function (obj, path) {
return this.value(obj, path)[0];
};
/**
* Push a value to an array on an object for the specified path.
* @param {Object} obj The object to update.
* @param {String} path The path to the array to push to.
* @param {*} val The value to push to the array at the object path.
* @returns {*}
*/
Path.prototype.push = function (obj, path, val) {
if (obj !== undefined && path !== undefined) {
var pathParts,
part;
path = this.clean(path);
pathParts = path.split('.');
part = pathParts.shift();
if (pathParts.length) {
// Generate the path part in the object if it does not already exist
obj[part] = obj[part] || {};
// Recurse
this.set(obj[part], pathParts.join('.'), val);
} else {
// Set the value
obj[part] = obj[part] || [];
if (obj[part] instanceof Array) {
obj[part].push(val);
} else {
throw('ForerunnerDB.Path: Cannot push to a path whose endpoint is not an array!');
}
}
}
return obj;
};
/**
* Gets the value(s) that the object contains for the currently assigned path string
* with their associated keys.
* @param {Object} obj The object to evaluate the path against.
* @param {String=} path A path to use instead of the existing one passed in path().
* @returns {Array} An array of values for the given path with the associated key.
*/
Path.prototype.keyValue = function (obj, path) {
var pathParts,
arr,
arrCount,
objPart,
objPartParent,
objPartHash,
i;
if (path !== undefined) {
path = this.clean(path);
pathParts = path.split('.');
}
arr = pathParts || this._pathParts;
arrCount = arr.length;
objPart = obj;
for (i = 0; i < arrCount; i++) {
objPart = objPart[arr[i]];
if (!objPart || typeof(objPart) !== 'object') {
objPartHash = arr[i] + ':' + objPart;
break;
}
objPartParent = objPart;
}
return objPartHash;
};
/**
* Removes leading period (.) from string and returns it.
* @param {String} str The string to clean.
* @returns {*}
*/
Path.prototype.clean = function (str) {
if (str.substr(0, 1) === '.') {
str = str.substr(1, str.length -1);
}
return str;
};
Shared.finishModule('Path');
module.exports = Path;
},{"./Shared":29}],27:[function(_dereq_,module,exports){
"use strict";
// TODO: Add doc comments to this class
// Import external names locally
var Shared = _dereq_('./Shared'),
localforage = _dereq_('localforage'),
Core,
Collection,
CollectionDrop,
CollectionGroup,
CollectionInit,
CoreInit,
Persist,
Overload;
Persist = function () {
this.init.apply(this, arguments);
};
Persist.prototype.init = function (db) {
// Check environment
if (db.isClient()) {
if (window.Storage !== undefined) {
this.mode('localforage');
localforage.config({
driver: [
localforage.INDEXEDDB,
localforage.WEBSQL,
localforage.LOCALSTORAGE
],
name: 'ForerunnerDB',
storeName: 'FDB'
});
}
}
};
Shared.addModule('Persist', Persist);
Shared.mixin(Persist.prototype, 'Mixin.ChainReactor');
Core = Shared.modules.Core;
Collection = _dereq_('./Collection');
CollectionDrop = Collection.prototype.drop;
CollectionGroup = _dereq_('./CollectionGroup');
CollectionInit = Collection.prototype.init;
CoreInit = Core.prototype.init;
Overload = Shared.overload;
Persist.prototype.mode = function (type) {
if (type !== undefined) {
this._mode = type;
return this;
}
return this._mode;
};
Persist.prototype.driver = function (val) {
if (val !== undefined) {
switch (val.toUpperCase()) {
case 'LOCALSTORAGE':
localforage.setDriver(localforage.LOCALSTORAGE);
break;
case 'WEBSQL':
localforage.setDriver(localforage.WEBSQL);
break;
case 'INDEXEDDB':
localforage.setDriver(localforage.INDEXEDDB);
break;
default:
throw('ForerunnerDB.Persist: The persistence driver you have specified is not found. Please use either IndexedDB, WebSQL or LocalStorage!');
}
return this;
}
return localforage.driver();
};
Persist.prototype.save = function (key, data, callback) {
var encode;
encode = function (val, finished) {
if (typeof val === 'object') {
val = 'json::fdb::' + JSON.stringify(val);
} else {
val = 'raw::fdb::' + val;
}
if (finished) {
finished(false, val);
}
};
switch (this.mode()) {
case 'localforage':
encode(data, function (err, data) {
localforage.setItem(key, data).then(function (data) {
callback(false, data);
}, function (err) {
callback(err);
});
});
break;
default:
if (callback) {
callback('No data handler.');
}
break;
}
};
Persist.prototype.load = function (key, callback) {
var parts,
data,
decode;
decode = function (val, finished) {
if (val) {
parts = val.split('::fdb::');
switch (parts[0]) {
case 'json':
data = JSON.parse(parts[1]);
break;
case 'raw':
data = parts[1];
break;
default:
break;
}
if (finished) {
finished(false, data);
}
} else {
finished(false, val);
}
};
switch (this.mode()) {
case 'localforage':
localforage.getItem(key).then(function (val) {
decode(val, callback);
}, function (err) {
callback(err);
});
break;
default:
if (callback) {
callback('No data handler or unrecognised data type.');
}
break;
}
};
Persist.prototype.drop = function (key, callback) {
switch (this.mode()) {
case 'localforage':
localforage.removeItem(key).then(function () {
callback(false);
}, function (err) {
callback(err);
});
break;
default:
if (callback) {
callback('No data handler or unrecognised data type.');
}
break;
}
};
// Extend the Collection prototype with persist methods
Collection.prototype.drop = new Overload({
/**
* Drop collection and persistent storage.
*/
'': function () {
if (this._state !== 'dropped') {
this.drop(true);
}
},
/**
* Drop collection and persistent storage with callback.
* @param {Function} callback Callback method.
*/
'function': function (callback) {
if (this._state !== 'dropped') {
this.drop(true, callback);
}
},
/**
* Drop collection and optionally drop persistent storage.
* @param {Boolean} removePersistent True to drop persistent storage, false to keep it.
*/
'boolean': function (removePersistent) {
if (this._state !== 'dropped') {
// Remove persistent storage
if (removePersistent) {
if (this._name) {
if (this._db) {
// Save the collection data
this._db.persist.drop(this._name);
} else {
throw('ForerunnerDB.Persist: Cannot drop a collection\'s persistent storage when the collection is not attached to a database!');
}
} else {
throw('ForerunnerDB.Persist: Cannot drop a collection\'s persistent storage when no name assigned to collection!');
}
}
// Call the original method
CollectionDrop.apply(this, arguments);
}
},
/**
* Drop collections and optionally drop persistent storage with callback.
* @param {Boolean} removePersistent True to drop persistent storage, false to keep it.
* @param {Function} callback Callback method.
*/
'boolean, function': function (removePersistent, callback) {
if (this._state !== 'dropped') {
// Remove persistent storage
if (removePersistent) {
if (this._name) {
if (this._db) {
// Save the collection data
this._db.persist.drop(this._name, callback);
} else {
if (callback) {
callback('Cannot drop a collection\'s persistent storage when the collection is not attached to a database!');
}
}
} else {
if (callback) {
callback('Cannot drop a collection\'s persistent storage when no name assigned to collection!');
}
}
}
// Call the original method
CollectionDrop.apply(this, arguments);
}
}
});
Collection.prototype.save = function (callback) {
if (this._name) {
if (this._db) {
// Save the collection data
this._db.persist.save(this._name, this._data, callback);
} else {
if (callback) {
callback('Cannot save a collection that is not attached to a database!');
}
}
} else {
if (callback) {
callback('Cannot save a collection with no assigned name!');
}
}
};
Collection.prototype.load = function (callback) {
var self = this;
if (this._name) {
if (this._db) {
// Load the collection data
this._db.persist.load(this._name, function (err, data) {
if (!err) {
if (data) {
self.setData(data);
}
if (callback) {
callback(false);
}
} else {
if (callback) {
callback(err);
}
}
});
} else {
if (callback) {
callback('Cannot load a collection that is not attached to a database!');
}
}
} else {
if (callback) {
callback('Cannot load a collection with no assigned name!');
}
}
};
// Override the DB init to instantiate the plugin
Core.prototype.init = function () {
this.persist = new Persist(this);
CoreInit.apply(this, arguments);
};
Core.prototype.load = function (callback) {
// Loop the collections in the database
var obj = this._collection,
keys = obj.keys(),
keyCount = keys.length,
loadCallback,
index;
loadCallback = function (err) {
if (!err) {
keyCount--;
if (keyCount === 0) {
callback(false);
}
} else {
callback(err);
}
};
for (index in obj) {
if (obj.hasOwnProperty(index)) {
// Call the collection load method
obj[index].load(loadCallback);
}
}
};
Core.prototype.save = function (callback) {
// Loop the collections in the database
var obj = this._collection,
keys = obj.keys(),
keyCount = keys.length,
saveCallback,
index;
saveCallback = function (err) {
if (!err) {
keyCount--;
if (keyCount === 0) {
callback(false);
}
} else {
callback(err);
}
};
for (index in obj) {
if (obj.hasOwnProperty(index)) {
// Call the collection save method
obj[index].save(saveCallback);
}
}
};
Shared.finishModule('Persist');
module.exports = Persist;
},{"./Collection":3,"./CollectionGroup":4,"./Shared":29,"localforage":38}],28:[function(_dereq_,module,exports){
"use strict";
var Shared = _dereq_('./Shared');
var ReactorIO = function (reactorIn, reactorOut, reactorProcess) {
if (reactorIn && reactorOut && reactorProcess) {
this._reactorIn = reactorIn;
this._reactorOut = reactorOut;
this._chainHandler = reactorProcess;
if (!reactorIn.chain || !reactorOut.chainReceive) {
throw('ForerunnerDB.ReactorIO: ReactorIO requires passed in and out objects to implement the ChainReactor mixin!');
}
// Register the reactorIO with the input
reactorIn.chain(this);
// Register the output with the reactorIO
this.chain(reactorOut);
} else {
throw('ForerunnerDB.ReactorIO: ReactorIO requires in, out and process arguments to instantiate!');
}
};
Shared.addModule('ReactorIO', ReactorIO);
ReactorIO.prototype.drop = function () {
if (this._state !== 'dropped') {
this._state = 'dropped';
// Remove links
if (this._reactorIn) {
this._reactorIn.unChain(this);
}
if (this._reactorOut) {
this.unChain(this._reactorOut);
}
delete this._reactorIn;
delete this._reactorOut;
delete this._chainHandler;
this.emit('drop', this);
}
return true;
};
/**
* Gets / sets the current state.
* @param {String=} val The name of the state to set.
* @returns {*}
*/
Shared.synthesize(ReactorIO.prototype, 'state');
Shared.mixin(ReactorIO.prototype, 'Mixin.ChainReactor');
Shared.mixin(ReactorIO.prototype, 'Mixin.Events');
Shared.finishModule('ReactorIO');
module.exports = ReactorIO;
},{"./Shared":29}],29:[function(_dereq_,module,exports){
"use strict";
var Shared = {
version: '1.3.29',
modules: {},
_synth: {},
/**
* Adds a module to ForerunnerDB.
* @param {String} name The name of the module.
* @param {Function} module The module class.
*/
addModule: function (name, module) {
this.modules[name] = module;
this.emit('moduleLoad', [name, module]);
},
/**
* Called by the module once all processing has been completed. Used to determine
* if the module is ready for use by other modules.
* @param {String} name The name of the module.
*/
finishModule: function (name) {
if (this.modules[name]) {
this.modules[name]._fdbFinished = true;
this.emit('moduleFinished', [name, this.modules[name]]);
} else {
throw('ForerunnerDB.Shared: finishModule called on a module that has not been registered with addModule(): ' + name);
}
},
/**
* Will call your callback method when the specified module has loaded. If the module
* is already loaded the callback is called immediately.
* @param {String} name The name of the module.
* @param {Function} callback The callback method to call when the module is loaded.
*/
moduleFinished: function (name, callback) {
if (this.modules[name] && this.modules[name]._fdbFinished) {
callback(name, this.modules[name]);
} else {
this.on('moduleFinished', callback);
}
},
/**
* Determines if a module has been added to ForerunnerDB or not.
* @param {String} name The name of the module.
* @returns {Boolean} True if the module exists or false if not.
*/
moduleExists: function (name) {
return Boolean(this.modules[name]);
},
/**
* Adds the properties and methods defined in the mixin to the passed object.
* @param {Object} obj The target object to add mixin key/values to.
* @param {String} mixinName The name of the mixin to add to the object.
*/
mixin: function (obj, mixinName) {
var system = this.mixins[mixinName];
if (system) {
for (var i in system) {
if (system.hasOwnProperty(i)) {
obj[i] = system[i];
}
}
} else {
throw('ForerunnerDB.Shared: Cannot find mixin named: ' + mixinName);
}
},
/**
* Generates a generic getter/setter method for the passed method name.
* @param {Object} obj The object to add the getter/setter to.
* @param {String} name The name of the getter/setter to generate.
* @param {Function=} extend A method to call before executing the getter/setter.
* The existing getter/setter can be accessed from the extend method via the
* $super e.g. this.$super();
*/
synthesize: function (obj, name, extend) {
this._synth[name] = this._synth[name] || function (val) {
if (val !== undefined) {
this['_' + name] = val;
return this;
}
return this['_' + name];
};
if (extend) {
var self = this;
obj[name] = function () {
var tmp = this.$super,
ret;
this.$super = self._synth[name];
ret = extend.apply(this, arguments);
this.$super = tmp;
return ret;
};
} else {
obj[name] = this._synth[name];
}
},
/**
* Allows a method to be overloaded.
* @param arr
* @returns {Function}
* @constructor
*/
overload: _dereq_('./Overload'),
/**
* Define the mixins that other modules can use as required.
*/
mixins: {
'Mixin.Common': _dereq_('./Mixin.Common'),
'Mixin.Events': _dereq_('./Mixin.Events'),
'Mixin.ChainReactor': _dereq_('./Mixin.ChainReactor'),
'Mixin.CRUD': _dereq_('./Mixin.CRUD'),
'Mixin.Constants': _dereq_('./Mixin.Constants'),
'Mixin.Triggers': _dereq_('./Mixin.Triggers'),
'Mixin.Sorting': _dereq_('./Mixin.Sorting'),
'Mixin.Matching': _dereq_('./Mixin.Matching')
}
};
// Add event handling to shared
Shared.mixin(Shared, 'Mixin.Events');
module.exports = Shared;
},{"./Mixin.CRUD":13,"./Mixin.ChainReactor":14,"./Mixin.Common":15,"./Mixin.Constants":16,"./Mixin.Events":17,"./Mixin.Matching":18,"./Mixin.Sorting":19,"./Mixin.Triggers":20,"./Overload":24}],30:[function(_dereq_,module,exports){
"use strict";
// Import external names locally
var Shared,
Core,
Collection,
CollectionGroup,
CollectionInit,
CoreInit,
ReactorIO,
ActiveBucket;
Shared = _dereq_('./Shared');
/**
* The view constructor.
* @param name
* @param query
* @param options
* @constructor
*/
var View = function (name, query, options) {
this.init.apply(this, arguments);
};
View.prototype.init = function (name, query, options) {
var self = this;
this._name = name;
this._listeners = {};
this._querySettings = {};
this._debug = {};
this.query(query, false);
this.queryOptions(options, false);
this._collectionDroppedWrap = function () {
self._collectionDropped.apply(self, arguments);
};
this._privateData = new Collection('__FDB__view_privateData_' + this._name);
};
Shared.addModule('View', View);
Shared.mixin(View.prototype, 'Mixin.Common');
Shared.mixin(View.prototype, 'Mixin.ChainReactor');
Shared.mixin(View.prototype, 'Mixin.Constants');
Shared.mixin(View.prototype, 'Mixin.Triggers');
Collection = _dereq_('./Collection');
CollectionGroup = _dereq_('./CollectionGroup');
ActiveBucket = _dereq_('./ActiveBucket');
ReactorIO = _dereq_('./ReactorIO');
CollectionInit = Collection.prototype.init;
Core = Shared.modules.Core;
CoreInit = Core.prototype.init;
/**
* Gets / sets the current state.
* @param {String=} val The name of the state to set.
* @returns {*}
*/
Shared.synthesize(View.prototype, 'state');
Shared.synthesize(View.prototype, 'name');
/**
* Executes an insert against the view's underlying data-source.
*/
View.prototype.insert = function () {
this._from.insert.apply(this._from, arguments);
};
/**
* Executes an update against the view's underlying data-source.
*/
View.prototype.update = function () {
this._from.update.apply(this._from, arguments);
};
/**
* Executes an updateById against the view's underlying data-source.
*/
View.prototype.updateById = function () {
this._from.updateById.apply(this._from, arguments);
};
/**
* Executes a remove against the view's underlying data-source.
*/
View.prototype.remove = function () {
this._from.remove.apply(this._from, arguments);
};
/**
* Queries the view data. See Collection.find() for more information.
* @returns {*}
*/
View.prototype.find = function (query, options) {
return this.publicData().find(query, options);
};
/**
* Gets the module's internal data collection.
* @returns {Collection}
*/
View.prototype.data = function () {
return this._privateData;
};
/**
* Sets the collection from which the view will assemble its data.
* @param {Collection} collection The collection to use to assemble view data.
* @returns {View}
*/
View.prototype.from = function (collection) {
var self = this;
if (collection !== undefined) {
// Check if we have an existing from
if (this._from) {
// Remove the listener to the drop event
this._from.off('drop', this._collectionDroppedWrap);
delete this._from;
}
if (typeof(collection) === 'string') {
collection = this._db.collection(collection);
}
this._from = collection;
this._from.on('drop', this._collectionDroppedWrap);
// Create a new reactor IO graph node that intercepts chain packets from the
// view's "from" collection and determines how they should be interpreted by
// this view. If the view does not have a query then this reactor IO will
// simply pass along the chain packet without modifying it.
this._io = new ReactorIO(collection, this, function (chainPacket) {
var data,
diff,
query,
filteredData,
doSend,
pk,
i;
// Check if we have a constraining query
if (self._querySettings.query) {
if (chainPacket.type === 'insert') {
data = chainPacket.data;
// Check if the data matches our query
if (data instanceof Array) {
filteredData = [];
for (i = 0; i < data.length; i++) {
if (self._privateData._match(data[i], self._querySettings.query, 'and', {})) {
filteredData.push(data[i]);
doSend = true;
}
}
} else {
if (self._privateData._match(data, self._querySettings.query, 'and', {})) {
filteredData = data;
doSend = true;
}
}
if (doSend) {
this.chainSend('insert', filteredData);
}
return true;
}
if (chainPacket.type === 'update') {
// Do a DB diff between this view's data and the underlying collection it reads from
// to see if something has changed
diff = self._privateData.diff(self._from.subset(self._querySettings.query, self._querySettings.options));
if (diff.insert.length || diff.remove.length) {
// Now send out new chain packets for each operation
if (diff.insert.length) {
this.chainSend('insert', diff.insert);
}
if (diff.update.length) {
pk = self._privateData.primaryKey();
for (i = 0; i < diff.update.length; i++) {
query = {};
query[pk] = diff.update[i][pk];
this.chainSend('update', {
query: query,
update: diff.update[i]
});
}
}
if (diff.remove.length) {
pk = self._privateData.primaryKey();
var $or = [],
removeQuery = {
query: {
$or: $or
}
};
for (i = 0; i < diff.remove.length; i++) {
$or.push({_id: diff.remove[i][pk]});
}
this.chainSend('remove', removeQuery);
}
// Return true to stop further propagation of the chain packet
return true;
} else {
// Returning false informs the chain reactor to continue propagation
// of the chain packet down the graph tree
return false;
}
}
}
// Returning false informs the chain reactor to continue propagation
// of the chain packet down the graph tree
return false;
});
var collData = collection.find(this._querySettings.query, this._querySettings.options);
this._transformPrimaryKey(collection.primaryKey());
this._transformSetData(collData);
this._privateData.primaryKey(collection.primaryKey());
this._privateData.setData(collData);
if (this._querySettings.options && this._querySettings.options.$orderBy) {
this.rebuildActiveBucket(this._querySettings.options.$orderBy);
} else {
this.rebuildActiveBucket();
}
}
return this;
};
View.prototype._collectionDropped = function (collection) {
if (collection) {
// Collection was dropped, remove from view
delete this._from;
}
};
View.prototype.ensureIndex = function () {
return this._privateData.ensureIndex.apply(this._privateData, arguments);
};
View.prototype._chainHandler = function (chainPacket) {
var //self = this,
arr,
count,
index,
insertIndex,
//tempData,
//dataIsArray,
updates,
//finalUpdates,
primaryKey,
tQuery,
item,
currentIndex,
i;
switch (chainPacket.type) {
case 'setData':
if (this.debug()) {
console.log('ForerunnerDB.View: Setting data on view "' + this.name() + '" in underlying (internal) view collection "' + this._privateData.name() + '"');
}
// Get the new data from our underlying data source sorted as we want
var collData = this._from.find(this._querySettings.query, this._querySettings.options);
// Modify transform data
this._transformSetData(collData);
this._privateData.setData(collData);
break;
case 'insert':
if (this.debug()) {
console.log('ForerunnerDB.View: Inserting some data on view "' + this.name() + '" in underlying (internal) view collection "' + this._privateData.name() + '"');
}
// Decouple the data to ensure we are working with our own copy
chainPacket.data = this.decouple(chainPacket.data);
// Make sure we are working with an array
if (!(chainPacket.data instanceof Array)) {
chainPacket.data = [chainPacket.data];
}
if (this._querySettings.options && this._querySettings.options.$orderBy) {
// Loop the insert data and find each item's index
arr = chainPacket.data;
count = arr.length;
for (index = 0; index < count; index++) {
insertIndex = this._activeBucket.insert(arr[index]);
// Modify transform data
this._transformInsert(chainPacket.data, insertIndex);
this._privateData._insertHandle(chainPacket.data, insertIndex);
}
} else {
// Set the insert index to the passed index, or if none, the end of the view data array
insertIndex = this._privateData._data.length;
// Modify transform data
this._transformInsert(chainPacket.data, insertIndex);
this._privateData._insertHandle(chainPacket.data, insertIndex);
}
break;
case 'update':
if (this.debug()) {
console.log('ForerunnerDB.View: Updating some data on view "' + this.name() + '" in underlying (internal) view collection "' + this._privateData.name() + '"');
}
primaryKey = this._privateData.primaryKey();
// Do the update
updates = this._privateData.update(
chainPacket.data.query,
chainPacket.data.update,
chainPacket.data.options
);
if (this._querySettings.options && this._querySettings.options.$orderBy) {
// TODO: This would be a good place to improve performance by somehow
// TODO: inspecting the change that occurred when update was performed
// TODO: above and determining if it affected the order clause keys
// TODO: and if not, skipping the active bucket updates here
// Loop the updated items and work out their new sort locations
count = updates.length;
for (index = 0; index < count; index++) {
item = updates[index];
// Remove the item from the active bucket (via it's id)
this._activeBucket.remove(item);
// Get the current location of the item
currentIndex = this._privateData._data.indexOf(item);
// Add the item back in to the active bucket
insertIndex = this._activeBucket.insert(item);
if (currentIndex !== insertIndex) {
// Move the updated item to the new index
this._privateData._updateSpliceMove(this._privateData._data, currentIndex, insertIndex);
}
}
}
if (this._transformEnabled && this._transformIn) {
primaryKey = this._publicData.primaryKey();
for (i = 0; i < updates.length; i++) {
tQuery = {};
item = updates[i];
tQuery[primaryKey] = item[primaryKey];
this._transformUpdate(tQuery, item);
}
}
break;
case 'remove':
if (this.debug()) {
console.log('ForerunnerDB.View: Removing some data on view "' + this.name() + '" in underlying (internal) view collection "' + this._privateData.name() + '"');
}
// Modify transform data
this._transformRemove(chainPacket.data.query, chainPacket.options);
this._privateData.remove(chainPacket.data.query, chainPacket.options);
break;
default:
break;
}
};
View.prototype.on = function () {
this._privateData.on.apply(this._privateData, arguments);
};
View.prototype.off = function () {
this._privateData.off.apply(this._privateData, arguments);
};
View.prototype.emit = function () {
this._privateData.emit.apply(this._privateData, arguments);
};
/**
* Find the distinct values for a specified field across a single collection and
* returns the results in an array.
* @param {String} key The field path to return distinct values for e.g. "person.name".
* @param {Object=} query The query to use to filter the documents used to return values from.
* @param {Object=} options The query options to use when running the query.
* @returns {Array}
*/
View.prototype.distinct = function (key, query, options) {
return this._privateData.distinct.apply(this._privateData, arguments);
};
/**
* Gets the primary key for this view from the assigned collection.
* @returns {String}
*/
View.prototype.primaryKey = function () {
return this._privateData.primaryKey();
};
/**
* Drops a view and all it's stored data from the database.
* @returns {boolean} True on success, false on failure.
*/
View.prototype.drop = function () {
if (this._state !== 'dropped') {
if (this._from) {
this._from.off('drop', this._collectionDroppedWrap);
this._from._removeView(this);
if (this.debug() || (this._db && this._db.debug())) {
console.log('ForerunnerDB.View: Dropping view ' + this._name);
}
this._state = 'dropped';
// Clear io and chains
if (this._io) {
this._io.drop();
}
// Drop the view's internal collection
if (this._privateData) {
this._privateData.drop();
}
if (this._db && this._name) {
delete this._db._view[this._name];
}
this.emit('drop', this);
delete this._chain;
delete this._from;
delete this._privateData;
delete this._io;
delete this._listeners;
delete this._querySettings;
delete this._db;
return true;
}
} else {
return true;
}
return false;
};
/**
* Gets / sets the DB the view is bound against. Automatically set
* when the db.oldView(viewName) method is called.
* @param db
* @returns {*}
*/
View.prototype.db = function (db) {
if (db !== undefined) {
this._db = db;
this.privateData().db(db);
this.publicData().db(db);
return this;
}
return this._db;
};
/**
* Gets / sets the query that the view uses to build it's data set.
* @param {Object=} query
* @param {Boolean=} options An options object.
* @param {Boolean=} refresh Whether to refresh the view data after
* this operation. Defaults to true.
* @returns {*}
*/
View.prototype.queryData = function (query, options, refresh) {
if (query !== undefined) {
this._querySettings.query = query;
}
if (options !== undefined) {
this._querySettings.options = options;
}
if (query !== undefined || options !== undefined) {
if (refresh === undefined || refresh === true) {
this.refresh();
}
return this;
}
return this._querySettings;
};
/**
* Add data to the existing query.
* @param {Object} obj The data whose keys will be added to the existing
* query object.
* @param {Boolean} overwrite Whether or not to overwrite data that already
* exists in the query object. Defaults to true.
* @param {Boolean=} refresh Whether or not to refresh the view data set
* once the operation is complete. Defaults to true.
*/
View.prototype.queryAdd = function (obj, overwrite, refresh) {
this._querySettings.query = this._querySettings.query || {};
var query = this._querySettings.query,
i;
if (obj !== undefined) {
// Loop object properties and add to existing query
for (i in obj) {
if (obj.hasOwnProperty(i)) {
if (query[i] === undefined || (query[i] !== undefined && overwrite !== false)) {
query[i] = obj[i];
}
}
}
}
if (refresh === undefined || refresh === true) {
this.refresh();
}
};
/**
* Remove data from the existing query.
* @param {Object} obj The data whose keys will be removed from the existing
* query object.
* @param {Boolean=} refresh Whether or not to refresh the view data set
* once the operation is complete. Defaults to true.
*/
View.prototype.queryRemove = function (obj, refresh) {
var query = this._querySettings.query,
i;
if (obj !== undefined) {
// Loop object properties and add to existing query
for (i in obj) {
if (obj.hasOwnProperty(i)) {
delete query[i];
}
}
}
if (refresh === undefined || refresh === true) {
this.refresh();
}
};
/**
* Gets / sets the query being used to generate the view data.
* @param {Object=} query The query to set.
* @param {Boolean=} refresh Whether to refresh the view data after
* this operation. Defaults to true.
* @returns {*}
*/
View.prototype.query = function (query, refresh) {
if (query !== undefined) {
this._querySettings.query = query;
if (refresh === undefined || refresh === true) {
this.refresh();
}
return this;
}
return this._querySettings.query;
};
/**
* Gets / sets the orderBy clause in the query options for the view.
* @param {Object=} val The order object.
* @returns {*}
*/
View.prototype.orderBy = function (val) {
if (val !== undefined) {
var queryOptions = this.queryOptions() || {};
queryOptions.$orderBy = val;
this.queryOptions(queryOptions);
return this;
}
return (this.queryOptions() || {}).$orderBy;
};
/**
* Gets / sets the query options used when applying sorting etc to the
* view data set.
* @param {Object=} options An options object.
* @param {Boolean=} refresh Whether to refresh the view data after
* this operation. Defaults to true.
* @returns {*}
*/
View.prototype.queryOptions = function (options, refresh) {
if (options !== undefined) {
this._querySettings.options = options;
if (options.$decouple === undefined) { options.$decouple = true; }
if (refresh === undefined || refresh === true) {
this.refresh();
} else {
this.rebuildActiveBucket(options.$orderBy);
}
return this;
}
return this._querySettings.options;
};
View.prototype.rebuildActiveBucket = function (orderBy) {
if (orderBy) {
var arr = this._privateData._data,
arrCount = arr.length;
// Build a new active bucket
this._activeBucket = new ActiveBucket(orderBy);
this._activeBucket.primaryKey(this._privateData.primaryKey());
// Loop the current view data and add each item
for (var i = 0; i < arrCount; i++) {
this._activeBucket.insert(arr[i]);
}
} else {
// Remove any existing active bucket
delete this._activeBucket;
}
};
/**
* Refreshes the view data such as ordering etc.
*/
View.prototype.refresh = function () {
if (this._from) {
var pubData = this.publicData();
// Re-grab all the data for the view from the collection
this._privateData.remove();
pubData.remove();
this._privateData.insert(this._from.find(this._querySettings.query, this._querySettings.options));
/*if (pubData._linked) {
// Update data and observers
//var transformedData = this._privateData.find();
// TODO: Shouldn't this data get passed into a transformIn first?
// TODO: This breaks linking because its passing decoupled data and overwriting non-decoupled data
// TODO: Is this even required anymore? After commenting it all seems to work
// TODO: Might be worth setting up a test to check trasforms and linking then remove this if working?
//jQuery.observable(pubData._data).refresh(transformedData);
}*/
}
if (this._querySettings.options && this._querySettings.options.$orderBy) {
this.rebuildActiveBucket(this._querySettings.options.$orderBy);
} else {
this.rebuildActiveBucket();
}
return this;
};
/**
* Returns the number of documents currently in the view.
* @returns {Number}
*/
View.prototype.count = function () {
return this._privateData && this._privateData._data ? this._privateData._data.length : 0;
};
// Call underlying
View.prototype.subset = function () {
return this.publicData().subset.apply(this._privateData, arguments);
};
/**
* Takes the passed data and uses it to set transform methods and globally
* enable or disable the transform system for the view.
* @param {Object} obj The new transform system settings "enabled", "dataIn" and "dataOut":
* {
* "enabled": true,
* "dataIn": function (data) { return data; },
* "dataOut": function (data) { return data; }
* }
* @returns {*}
*/
View.prototype.transform = function (obj) {
if (obj !== undefined) {
if (typeof obj === "object") {
if (obj.enabled !== undefined) {
this._transformEnabled = obj.enabled;
}
if (obj.dataIn !== undefined) {
this._transformIn = obj.dataIn;
}
if (obj.dataOut !== undefined) {
this._transformOut = obj.dataOut;
}
} else {
this._transformEnabled = obj !== false;
}
// Update the transformed data object
this._transformPrimaryKey(this.privateData().primaryKey());
this._transformSetData(this.privateData().find());
return this;
}
return {
enabled: this._transformEnabled,
dataIn: this._transformIn,
dataOut: this._transformOut
};
};
/**
* Returns the non-transformed data the view holds as a collection
* reference.
* @return {Collection} The non-transformed collection reference.
*/
View.prototype.privateData = function () {
return this._privateData;
};
/**
* Returns a data object representing the public data this view
* contains. This can change depending on if transforms are being
* applied to the view or not.
*
* If no transforms are applied then the public data will be the
* same as the private data the view holds. If transforms are
* applied then the public data will contain the transformed version
* of the private data.
*
* The public data collection is also used by data binding to only
* changes to the publicData will show in a data-bound element.
*/
View.prototype.publicData = function () {
if (this._transformEnabled) {
return this._publicData;
} else {
return this._privateData;
}
};
/**
* Updates the public data object to match data from the private data object
* by running private data through the dataIn method provided in
* the transform() call.
* @private
*/
View.prototype._transformSetData = function (data) {
if (this._transformEnabled) {
// Clear existing data
this._publicData = new Collection('__FDB__view_publicData_' + this._name);
this._publicData.db(this._privateData._db);
this._publicData.transform({
enabled: true,
dataIn: this._transformIn,
dataOut: this._transformOut
});
this._publicData.setData(data);
}
};
View.prototype._transformInsert = function (data, index) {
if (this._transformEnabled && this._publicData) {
this._publicData.insert(data, index);
}
};
View.prototype._transformUpdate = function (query, update, options) {
if (this._transformEnabled && this._publicData) {
this._publicData.update(query, update, options);
}
};
View.prototype._transformRemove = function (query, options) {
if (this._transformEnabled && this._publicData) {
this._publicData.remove(query, options);
}
};
View.prototype._transformPrimaryKey = function (key) {
if (this._transformEnabled && this._publicData) {
this._publicData.primaryKey(key);
}
};
// Extend collection with view init
Collection.prototype.init = function () {
this._view = [];
CollectionInit.apply(this, arguments);
};
/**
* Creates a view and assigns the collection as its data source.
* @param {String} name The name of the new view.
* @param {Object} query The query to apply to the new view.
* @param {Object} options The options object to apply to the view.
* @returns {*}
*/
Collection.prototype.view = function (name, query, options) {
if (this._db && this._db._view ) {
if (!this._db._view[name]) {
var view = new View(name, query, options)
.db(this._db)
.from(this);
this._view = this._view || [];
this._view.push(view);
return view;
} else {
throw('ForerunnerDB.Collection "' + this.name() + '": Cannot create a view using this collection because a view with this name already exists: ' + name);
}
}
};
/**
* Adds a view to the internal view lookup.
* @param {View} view The view to add.
* @returns {Collection}
* @private
*/
Collection.prototype._addView = CollectionGroup.prototype._addView = function (view) {
if (view !== undefined) {
this._view.push(view);
}
return this;
};
/**
* Removes a view from the internal view lookup.
* @param {View} view The view to remove.
* @returns {Collection}
* @private
*/
Collection.prototype._removeView = CollectionGroup.prototype._removeView = function (view) {
if (view !== undefined) {
var index = this._view.indexOf(view);
if (index > -1) {
this._view.splice(index, 1);
}
}
return this;
};
// Extend DB with views init
Core.prototype.init = function () {
this._view = {};
CoreInit.apply(this, arguments);
};
/**
* Gets a view by it's name.
* @param {String} viewName The name of the view to retrieve.
* @returns {*}
*/
Core.prototype.view = function (viewName) {
if (!this._view[viewName]) {
if (this.debug() || (this._db && this._db.debug())) {
console.log('Core.View: Creating view ' + viewName);
}
}
this._view[viewName] = this._view[viewName] || new View(viewName).db(this);
return this._view[viewName];
};
/**
* Determine if a view with the passed name already exists.
* @param {String} viewName The name of the view to check for.
* @returns {boolean}
*/
Core.prototype.viewExists = function (viewName) {
return Boolean(this._view[viewName]);
};
/**
* Returns an array of views the DB currently has.
* @returns {Array} An array of objects containing details of each view
* the database is currently managing.
*/
Core.prototype.views = function () {
var arr = [],
i;
for (i in this._view) {
if (this._view.hasOwnProperty(i)) {
arr.push({
name: i,
count: this._view[i].count()
});
}
}
return arr;
};
Shared.finishModule('View');
module.exports = View;
},{"./ActiveBucket":2,"./Collection":3,"./CollectionGroup":4,"./ReactorIO":28,"./Shared":29}],31:[function(_dereq_,module,exports){
// shim for using process in browser
var process = module.exports = {};
var queue = [];
var draining = false;
function drainQueue() {
if (draining) {
return;
}
draining = true;
var currentQueue;
var len = queue.length;
while(len) {
currentQueue = queue;
queue = [];
var i = -1;
while (++i < len) {
currentQueue[i]();
}
len = queue.length;
}
draining = false;
}
process.nextTick = function (fun) {
queue.push(fun);
if (!draining) {
setTimeout(drainQueue, 0);
}
};
process.title = 'browser';
process.browser = true;
process.env = {};
process.argv = [];
process.version = ''; // empty string to avoid regexp issues
process.versions = {};
function noop() {}
process.on = noop;
process.addListener = noop;
process.once = noop;
process.off = noop;
process.removeListener = noop;
process.removeAllListeners = noop;
process.emit = noop;
process.binding = function (name) {
throw new Error('process.binding is not supported');
};
// TODO(shtylman)
process.cwd = function () { return '/' };
process.chdir = function (dir) {
throw new Error('process.chdir is not supported');
};
process.umask = function() { return 0; };
},{}],32:[function(_dereq_,module,exports){
'use strict';
var asap = _dereq_('asap')
module.exports = Promise
function Promise(fn) {
if (typeof this !== 'object') throw new TypeError('Promises must be constructed via new')
if (typeof fn !== 'function') throw new TypeError('not a function')
var state = null
var value = null
var deferreds = []
var self = this
this.then = function(onFulfilled, onRejected) {
return new Promise(function(resolve, reject) {
handle(new Handler(onFulfilled, onRejected, resolve, reject))
})
}
function handle(deferred) {
if (state === null) {
deferreds.push(deferred)
return
}
asap(function() {
var cb = state ? deferred.onFulfilled : deferred.onRejected
if (cb === null) {
(state ? deferred.resolve : deferred.reject)(value)
return
}
var ret
try {
ret = cb(value)
}
catch (e) {
deferred.reject(e)
return
}
deferred.resolve(ret)
})
}
function resolve(newValue) {
try { //Promise Resolution Procedure: https://github.com/promises-aplus/promises-spec#the-promise-resolution-procedure
if (newValue === self) throw new TypeError('A promise cannot be resolved with itself.')
if (newValue && (typeof newValue === 'object' || typeof newValue === 'function')) {
var then = newValue.then
if (typeof then === 'function') {
doResolve(then.bind(newValue), resolve, reject)
return
}
}
state = true
value = newValue
finale()
} catch (e) { reject(e) }
}
function reject(newValue) {
state = false
value = newValue
finale()
}
function finale() {
for (var i = 0, len = deferreds.length; i < len; i++)
handle(deferreds[i])
deferreds = null
}
doResolve(fn, resolve, reject)
}
function Handler(onFulfilled, onRejected, resolve, reject){
this.onFulfilled = typeof onFulfilled === 'function' ? onFulfilled : null
this.onRejected = typeof onRejected === 'function' ? onRejected : null
this.resolve = resolve
this.reject = reject
}
/**
* Take a potentially misbehaving resolver function and make sure
* onFulfilled and onRejected are only called once.
*
* Makes no guarantees about asynchrony.
*/
function doResolve(fn, onFulfilled, onRejected) {
var done = false;
try {
fn(function (value) {
if (done) return
done = true
onFulfilled(value)
}, function (reason) {
if (done) return
done = true
onRejected(reason)
})
} catch (ex) {
if (done) return
done = true
onRejected(ex)
}
}
},{"asap":34}],33:[function(_dereq_,module,exports){
'use strict';
//This file contains then/promise specific extensions to the core promise API
var Promise = _dereq_('./core.js')
var asap = _dereq_('asap')
module.exports = Promise
/* Static Functions */
function ValuePromise(value) {
this.then = function (onFulfilled) {
if (typeof onFulfilled !== 'function') return this
return new Promise(function (resolve, reject) {
asap(function () {
try {
resolve(onFulfilled(value))
} catch (ex) {
reject(ex);
}
})
})
}
}
ValuePromise.prototype = Object.create(Promise.prototype)
var TRUE = new ValuePromise(true)
var FALSE = new ValuePromise(false)
var NULL = new ValuePromise(null)
var UNDEFINED = new ValuePromise(undefined)
var ZERO = new ValuePromise(0)
var EMPTYSTRING = new ValuePromise('')
Promise.resolve = function (value) {
if (value instanceof Promise) return value
if (value === null) return NULL
if (value === undefined) return UNDEFINED
if (value === true) return TRUE
if (value === false) return FALSE
if (value === 0) return ZERO
if (value === '') return EMPTYSTRING
if (typeof value === 'object' || typeof value === 'function') {
try {
var then = value.then
if (typeof then === 'function') {
return new Promise(then.bind(value))
}
} catch (ex) {
return new Promise(function (resolve, reject) {
reject(ex)
})
}
}
return new ValuePromise(value)
}
Promise.from = Promise.cast = function (value) {
var err = new Error('Promise.from and Promise.cast are deprecated, use Promise.resolve instead')
err.name = 'Warning'
console.warn(err.stack)
return Promise.resolve(value)
}
Promise.denodeify = function (fn, argumentCount) {
argumentCount = argumentCount || Infinity
return function () {
var self = this
var args = Array.prototype.slice.call(arguments)
return new Promise(function (resolve, reject) {
while (args.length && args.length > argumentCount) {
args.pop()
}
args.push(function (err, res) {
if (err) reject(err)
else resolve(res)
})
fn.apply(self, args)
})
}
}
Promise.nodeify = function (fn) {
return function () {
var args = Array.prototype.slice.call(arguments)
var callback = typeof args[args.length - 1] === 'function' ? args.pop() : null
try {
return fn.apply(this, arguments).nodeify(callback)
} catch (ex) {
if (callback === null || typeof callback == 'undefined') {
return new Promise(function (resolve, reject) { reject(ex) })
} else {
asap(function () {
callback(ex)
})
}
}
}
}
Promise.all = function () {
var calledWithArray = arguments.length === 1 && Array.isArray(arguments[0])
var args = Array.prototype.slice.call(calledWithArray ? arguments[0] : arguments)
if (!calledWithArray) {
var err = new Error('Promise.all should be called with a single array, calling it with multiple arguments is deprecated')
err.name = 'Warning'
console.warn(err.stack)
}
return new Promise(function (resolve, reject) {
if (args.length === 0) return resolve([])
var remaining = args.length
function res(i, val) {
try {
if (val && (typeof val === 'object' || typeof val === 'function')) {
var then = val.then
if (typeof then === 'function') {
then.call(val, function (val) { res(i, val) }, reject)
return
}
}
args[i] = val
if (--remaining === 0) {
resolve(args);
}
} catch (ex) {
reject(ex)
}
}
for (var i = 0; i < args.length; i++) {
res(i, args[i])
}
})
}
Promise.reject = function (value) {
return new Promise(function (resolve, reject) {
reject(value);
});
}
Promise.race = function (values) {
return new Promise(function (resolve, reject) {
values.forEach(function(value){
Promise.resolve(value).then(resolve, reject);
})
});
}
/* Prototype Methods */
Promise.prototype.done = function (onFulfilled, onRejected) {
var self = arguments.length ? this.then.apply(this, arguments) : this
self.then(null, function (err) {
asap(function () {
throw err
})
})
}
Promise.prototype.nodeify = function (callback) {
if (typeof callback != 'function') return this
this.then(function (value) {
asap(function () {
callback(null, value)
})
}, function (err) {
asap(function () {
callback(err)
})
})
}
Promise.prototype['catch'] = function (onRejected) {
return this.then(null, onRejected);
}
},{"./core.js":32,"asap":34}],34:[function(_dereq_,module,exports){
(function (process){
// Use the fastest possible means to execute a task in a future turn
// of the event loop.
// linked list of tasks (single, with head node)
var head = {task: void 0, next: null};
var tail = head;
var flushing = false;
var requestFlush = void 0;
var isNodeJS = false;
function flush() {
/* jshint loopfunc: true */
while (head.next) {
head = head.next;
var task = head.task;
head.task = void 0;
var domain = head.domain;
if (domain) {
head.domain = void 0;
domain.enter();
}
try {
task();
} catch (e) {
if (isNodeJS) {
// In node, uncaught exceptions are considered fatal errors.
// Re-throw them synchronously to interrupt flushing!
// Ensure continuation if the uncaught exception is suppressed
// listening "uncaughtException" events (as domains does).
// Continue in next event to avoid tick recursion.
if (domain) {
domain.exit();
}
setTimeout(flush, 0);
if (domain) {
domain.enter();
}
throw e;
} else {
// In browsers, uncaught exceptions are not fatal.
// Re-throw them asynchronously to avoid slow-downs.
setTimeout(function() {
throw e;
}, 0);
}
}
if (domain) {
domain.exit();
}
}
flushing = false;
}
if (typeof process !== "undefined" && process.nextTick) {
// Node.js before 0.9. Note that some fake-Node environments, like the
// Mocha test runner, introduce a `process` global without a `nextTick`.
isNodeJS = true;
requestFlush = function () {
process.nextTick(flush);
};
} else if (typeof setImmediate === "function") {
// In IE10, Node.js 0.9+, or https://github.com/NobleJS/setImmediate
if (typeof window !== "undefined") {
requestFlush = setImmediate.bind(window, flush);
} else {
requestFlush = function () {
setImmediate(flush);
};
}
} else if (typeof MessageChannel !== "undefined") {
// modern browsers
// http://www.nonblocking.io/2011/06/windownexttick.html
var channel = new MessageChannel();
channel.port1.onmessage = flush;
requestFlush = function () {
channel.port2.postMessage(0);
};
} else {
// old browsers
requestFlush = function () {
setTimeout(flush, 0);
};
}
function asap(task) {
tail = tail.next = {
task: task,
domain: isNodeJS && process.domain,
next: null
};
if (!flushing) {
flushing = true;
requestFlush();
}
};
module.exports = asap;
}).call(this,_dereq_('_process'))
},{"_process":31}],35:[function(_dereq_,module,exports){
// Some code originally from async_storage.js in
// [Gaia](https://github.com/mozilla-b2g/gaia).
(function() {
'use strict';
// Originally found in https://github.com/mozilla-b2g/gaia/blob/e8f624e4cc9ea945727278039b3bc9bcb9f8667a/shared/js/async_storage.js
// Promises!
var Promise = (typeof module !== 'undefined' && module.exports) ?
_dereq_('promise') : this.Promise;
// Initialize IndexedDB; fall back to vendor-prefixed versions if needed.
var indexedDB = indexedDB || this.indexedDB || this.webkitIndexedDB ||
this.mozIndexedDB || this.OIndexedDB ||
this.msIndexedDB;
// If IndexedDB isn't available, we get outta here!
if (!indexedDB) {
return;
}
// Open the IndexedDB database (automatically creates one if one didn't
// previously exist), using any options set in the config.
function _initStorage(options) {
var self = this;
var dbInfo = {
db: null
};
if (options) {
for (var i in options) {
dbInfo[i] = options[i];
}
}
return new Promise(function(resolve, reject) {
var openreq = indexedDB.open(dbInfo.name, dbInfo.version);
openreq.onerror = function() {
reject(openreq.error);
};
openreq.onupgradeneeded = function() {
// First time setup: create an empty object store
openreq.result.createObjectStore(dbInfo.storeName);
};
openreq.onsuccess = function() {
dbInfo.db = openreq.result;
self._dbInfo = dbInfo;
resolve();
};
});
}
function getItem(key, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
var store = dbInfo.db.transaction(dbInfo.storeName, 'readonly')
.objectStore(dbInfo.storeName);
var req = store.get(key);
req.onsuccess = function() {
var value = req.result;
if (value === undefined) {
value = null;
}
resolve(value);
};
req.onerror = function() {
reject(req.error);
};
})['catch'](reject);
});
executeDeferedCallback(promise, callback);
return promise;
}
// Iterate over all items stored in database.
function iterate(iterator, callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
var store = dbInfo.db.transaction(dbInfo.storeName, 'readonly')
.objectStore(dbInfo.storeName);
var req = store.openCursor();
var iterationNumber = 1;
req.onsuccess = function() {
var cursor = req.result;
if (cursor) {
var result = iterator(cursor.value, cursor.key, iterationNumber++);
if (result !== void(0)) {
resolve(result);
} else {
cursor['continue']();
}
} else {
resolve();
}
};
req.onerror = function() {
reject(req.error);
};
})['catch'](reject);
});
executeDeferedCallback(promise, callback);
return promise;
}
function setItem(key, value, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
var transaction = dbInfo.db.transaction(dbInfo.storeName, 'readwrite');
var store = transaction.objectStore(dbInfo.storeName);
// The reason we don't _save_ null is because IE 10 does
// not support saving the `null` type in IndexedDB. How
// ironic, given the bug below!
// See: https://github.com/mozilla/localForage/issues/161
if (value === null) {
value = undefined;
}
var req = store.put(value, key);
transaction.oncomplete = function() {
// Cast to undefined so the value passed to
// callback/promise is the same as what one would get out
// of `getItem()` later. This leads to some weirdness
// (setItem('foo', undefined) will return `null`), but
// it's not my fault localStorage is our baseline and that
// it's weird.
if (value === undefined) {
value = null;
}
resolve(value);
};
transaction.onabort = transaction.onerror = function() {
reject(req.error);
};
})['catch'](reject);
});
executeDeferedCallback(promise, callback);
return promise;
}
function removeItem(key, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
var transaction = dbInfo.db.transaction(dbInfo.storeName, 'readwrite');
var store = transaction.objectStore(dbInfo.storeName);
// We use a Grunt task to make this safe for IE and some
// versions of Android (including those used by Cordova).
// Normally IE won't like `['delete']()` and will insist on
// using `['delete']()`, but we have a build step that
// fixes this for us now.
var req = store['delete'](key);
transaction.oncomplete = function() {
resolve();
};
transaction.onerror = function() {
reject(req.error);
};
// The request will be aborted if we've exceeded our storage
// space. In this case, we will reject with a specific
// "QuotaExceededError".
transaction.onabort = function(event) {
var error = event.target.error;
if (error === 'QuotaExceededError') {
reject(error);
}
};
})['catch'](reject);
});
executeDeferedCallback(promise, callback);
return promise;
}
function clear(callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
var transaction = dbInfo.db.transaction(dbInfo.storeName, 'readwrite');
var store = transaction.objectStore(dbInfo.storeName);
var req = store.clear();
transaction.oncomplete = function() {
resolve();
};
transaction.onabort = transaction.onerror = function() {
reject(req.error);
};
})['catch'](reject);
});
executeDeferedCallback(promise, callback);
return promise;
}
function length(callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
var store = dbInfo.db.transaction(dbInfo.storeName, 'readonly')
.objectStore(dbInfo.storeName);
var req = store.count();
req.onsuccess = function() {
resolve(req.result);
};
req.onerror = function() {
reject(req.error);
};
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
function key(n, callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
if (n < 0) {
resolve(null);
return;
}
self.ready().then(function() {
var dbInfo = self._dbInfo;
var store = dbInfo.db.transaction(dbInfo.storeName, 'readonly')
.objectStore(dbInfo.storeName);
var advanced = false;
var req = store.openCursor();
req.onsuccess = function() {
var cursor = req.result;
if (!cursor) {
// this means there weren't enough keys
resolve(null);
return;
}
if (n === 0) {
// We have the first key, return it if that's what they
// wanted.
resolve(cursor.key);
} else {
if (!advanced) {
// Otherwise, ask the cursor to skip ahead n
// records.
advanced = true;
cursor.advance(n);
} else {
// When we get here, we've got the nth key.
resolve(cursor.key);
}
}
};
req.onerror = function() {
reject(req.error);
};
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
function keys(callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
var store = dbInfo.db.transaction(dbInfo.storeName, 'readonly')
.objectStore(dbInfo.storeName);
var req = store.openCursor();
var keys = [];
req.onsuccess = function() {
var cursor = req.result;
if (!cursor) {
resolve(keys);
return;
}
keys.push(cursor.key);
cursor['continue']();
};
req.onerror = function() {
reject(req.error);
};
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
function executeCallback(promise, callback) {
if (callback) {
promise.then(function(result) {
callback(null, result);
}, function(error) {
callback(error);
});
}
}
function executeDeferedCallback(promise, callback) {
if (callback) {
promise.then(function(result) {
deferCallback(callback, result);
}, function(error) {
callback(error);
});
}
}
// Under Chrome the callback is called before the changes (save, clear)
// are actually made. So we use a defer function which wait that the
// call stack to be empty.
// For more info : https://github.com/mozilla/localForage/issues/175
// Pull request : https://github.com/mozilla/localForage/pull/178
function deferCallback(callback, result) {
if (callback) {
return setTimeout(function() {
return callback(null, result);
}, 0);
}
}
var asyncStorage = {
_driver: 'asyncStorage',
_initStorage: _initStorage,
iterate: iterate,
getItem: getItem,
setItem: setItem,
removeItem: removeItem,
clear: clear,
length: length,
key: key,
keys: keys
};
if (typeof module !== 'undefined' && module.exports) {
module.exports = asyncStorage;
} else if (typeof define === 'function' && define.amd) {
define('asyncStorage', function() {
return asyncStorage;
});
} else {
this.asyncStorage = asyncStorage;
}
}).call(window);
},{"promise":33}],36:[function(_dereq_,module,exports){
// If IndexedDB isn't available, we'll fall back to localStorage.
// Note that this will have considerable performance and storage
// side-effects (all data will be serialized on save and only data that
// can be converted to a string via `JSON.stringify()` will be saved).
(function() {
'use strict';
// Promises!
var Promise = (typeof module !== 'undefined' && module.exports) ?
_dereq_('promise') : this.Promise;
var globalObject = this;
var serializer = null;
var localStorage = null;
// If the app is running inside a Google Chrome packaged webapp, or some
// other context where localStorage isn't available, we don't use
// localStorage. This feature detection is preferred over the old
// `if (window.chrome && window.chrome.runtime)` code.
// See: https://github.com/mozilla/localForage/issues/68
try {
// If localStorage isn't available, we get outta here!
// This should be inside a try catch
if (!this.localStorage || !('setItem' in this.localStorage)) {
return;
}
// Initialize localStorage and create a variable to use throughout
// the code.
localStorage = this.localStorage;
} catch (e) {
return;
}
var ModuleType = {
DEFINE: 1,
EXPORT: 2,
WINDOW: 3
};
// Attaching to window (i.e. no module loader) is the assumed,
// simple default.
var moduleType = ModuleType.WINDOW;
// Find out what kind of module setup we have; if none, we'll just attach
// localForage to the main window.
if (typeof module !== 'undefined' && module.exports) {
moduleType = ModuleType.EXPORT;
} else if (typeof define === 'function' && define.amd) {
moduleType = ModuleType.DEFINE;
}
// Config the localStorage backend, using options set in the config.
function _initStorage(options) {
var self = this;
var dbInfo = {};
if (options) {
for (var i in options) {
dbInfo[i] = options[i];
}
}
dbInfo.keyPrefix = dbInfo.name + '/';
self._dbInfo = dbInfo;
var serializerPromise = new Promise(function(resolve/*, reject*/) {
// We allow localForage to be declared as a module or as a
// library available without AMD/require.js.
if (moduleType === ModuleType.DEFINE) {
_dereq_(['localforageSerializer'], resolve);
} else if (moduleType === ModuleType.EXPORT) {
// Making it browserify friendly
resolve(_dereq_('./../utils/serializer'));
} else {
resolve(globalObject.localforageSerializer);
}
});
return serializerPromise.then(function(lib) {
serializer = lib;
return Promise.resolve();
});
}
// Remove all keys from the datastore, effectively destroying all data in
// the app's key/value store!
function clear(callback) {
var self = this;
var promise = self.ready().then(function() {
var keyPrefix = self._dbInfo.keyPrefix;
for (var i = localStorage.length - 1; i >= 0; i--) {
var key = localStorage.key(i);
if (key.indexOf(keyPrefix) === 0) {
localStorage.removeItem(key);
}
}
});
executeCallback(promise, callback);
return promise;
}
// Retrieve an item from the store. Unlike the original async_storage
// library in Gaia, we don't modify return values at all. If a key's value
// is `undefined`, we pass that value to the callback function.
function getItem(key, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = self.ready().then(function() {
var dbInfo = self._dbInfo;
var result = localStorage.getItem(dbInfo.keyPrefix + key);
// If a result was found, parse it from the serialized
// string into a JS object. If result isn't truthy, the key
// is likely undefined and we'll pass it straight to the
// callback.
if (result) {
result = serializer.deserialize(result);
}
return result;
});
executeCallback(promise, callback);
return promise;
}
// Iterate over all items in the store.
function iterate(iterator, callback) {
var self = this;
var promise = self.ready().then(function() {
var keyPrefix = self._dbInfo.keyPrefix;
var keyPrefixLength = keyPrefix.length;
var length = localStorage.length;
for (var i = 0; i < length; i++) {
var key = localStorage.key(i);
var value = localStorage.getItem(key);
// If a result was found, parse it from the serialized
// string into a JS object. If result isn't truthy, the
// key is likely undefined and we'll pass it straight
// to the iterator.
if (value) {
value = serializer.deserialize(value);
}
value = iterator(value, key.substring(keyPrefixLength), i + 1);
if (value !== void(0)) {
return value;
}
}
});
executeCallback(promise, callback);
return promise;
}
// Same as localStorage's key() method, except takes a callback.
function key(n, callback) {
var self = this;
var promise = self.ready().then(function() {
var dbInfo = self._dbInfo;
var result;
try {
result = localStorage.key(n);
} catch (error) {
result = null;
}
// Remove the prefix from the key, if a key is found.
if (result) {
result = result.substring(dbInfo.keyPrefix.length);
}
return result;
});
executeCallback(promise, callback);
return promise;
}
function keys(callback) {
var self = this;
var promise = self.ready().then(function() {
var dbInfo = self._dbInfo;
var length = localStorage.length;
var keys = [];
for (var i = 0; i < length; i++) {
if (localStorage.key(i).indexOf(dbInfo.keyPrefix) === 0) {
keys.push(localStorage.key(i).substring(dbInfo.keyPrefix.length));
}
}
return keys;
});
executeCallback(promise, callback);
return promise;
}
// Supply the number of keys in the datastore to the callback function.
function length(callback) {
var self = this;
var promise = self.keys().then(function(keys) {
return keys.length;
});
executeCallback(promise, callback);
return promise;
}
// Remove an item from the store, nice and simple.
function removeItem(key, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = self.ready().then(function() {
var dbInfo = self._dbInfo;
localStorage.removeItem(dbInfo.keyPrefix + key);
});
executeCallback(promise, callback);
return promise;
}
// Set a key's value and run an optional callback once the value is set.
// Unlike Gaia's implementation, the callback function is passed the value,
// in case you want to operate on that value only after you're sure it
// saved, or something like that.
function setItem(key, value, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = self.ready().then(function() {
// Convert undefined values to null.
// https://github.com/mozilla/localForage/pull/42
if (value === undefined) {
value = null;
}
// Save the original value to pass to the callback.
var originalValue = value;
return new Promise(function(resolve, reject) {
serializer.serialize(value, function(value, error) {
if (error) {
reject(error);
} else {
try {
var dbInfo = self._dbInfo;
localStorage.setItem(dbInfo.keyPrefix + key, value);
resolve(originalValue);
} catch (e) {
// localStorage capacity exceeded.
// TODO: Make this a specific error/event.
if (e.name === 'QuotaExceededError' ||
e.name === 'NS_ERROR_DOM_QUOTA_REACHED') {
reject(e);
}
reject(e);
}
}
});
});
});
executeCallback(promise, callback);
return promise;
}
function executeCallback(promise, callback) {
if (callback) {
promise.then(function(result) {
callback(null, result);
}, function(error) {
callback(error);
});
}
}
var localStorageWrapper = {
_driver: 'localStorageWrapper',
_initStorage: _initStorage,
// Default API, from Gaia/localStorage.
iterate: iterate,
getItem: getItem,
setItem: setItem,
removeItem: removeItem,
clear: clear,
length: length,
key: key,
keys: keys
};
if (moduleType === ModuleType.EXPORT) {
module.exports = localStorageWrapper;
} else if (moduleType === ModuleType.DEFINE) {
define('localStorageWrapper', function() {
return localStorageWrapper;
});
} else {
this.localStorageWrapper = localStorageWrapper;
}
}).call(window);
},{"./../utils/serializer":39,"promise":33}],37:[function(_dereq_,module,exports){
/*
* Includes code from:
*
* base64-arraybuffer
* https://github.com/niklasvh/base64-arraybuffer
*
* Copyright (c) 2012 Niklas von Hertzen
* Licensed under the MIT license.
*/
(function() {
'use strict';
// Promises!
var Promise = (typeof module !== 'undefined' && module.exports) ?
_dereq_('promise') : this.Promise;
var globalObject = this;
var serializer = null;
var openDatabase = this.openDatabase;
// If WebSQL methods aren't available, we can stop now.
if (!openDatabase) {
return;
}
var ModuleType = {
DEFINE: 1,
EXPORT: 2,
WINDOW: 3
};
// Attaching to window (i.e. no module loader) is the assumed,
// simple default.
var moduleType = ModuleType.WINDOW;
// Find out what kind of module setup we have; if none, we'll just attach
// localForage to the main window.
if (typeof module !== 'undefined' && module.exports) {
moduleType = ModuleType.EXPORT;
} else if (typeof define === 'function' && define.amd) {
moduleType = ModuleType.DEFINE;
}
// Open the WebSQL database (automatically creates one if one didn't
// previously exist), using any options set in the config.
function _initStorage(options) {
var self = this;
var dbInfo = {
db: null
};
if (options) {
for (var i in options) {
dbInfo[i] = typeof(options[i]) !== 'string' ?
options[i].toString() : options[i];
}
}
var serializerPromise = new Promise(function(resolve/*, reject*/) {
// We allow localForage to be declared as a module or as a
// library available without AMD/require.js.
if (moduleType === ModuleType.DEFINE) {
_dereq_(['localforageSerializer'], resolve);
} else if (moduleType === ModuleType.EXPORT) {
// Making it browserify friendly
resolve(_dereq_('./../utils/serializer'));
} else {
resolve(globalObject.localforageSerializer);
}
});
var dbInfoPromise = new Promise(function(resolve, reject) {
// Open the database; the openDatabase API will automatically
// create it for us if it doesn't exist.
try {
dbInfo.db = openDatabase(dbInfo.name, String(dbInfo.version),
dbInfo.description, dbInfo.size);
} catch (e) {
return self.setDriver(self.LOCALSTORAGE).then(function() {
return self._initStorage(options);
}).then(resolve)['catch'](reject);
}
// Create our key/value table if it doesn't exist.
dbInfo.db.transaction(function(t) {
t.executeSql('CREATE TABLE IF NOT EXISTS ' + dbInfo.storeName +
' (id INTEGER PRIMARY KEY, key unique, value)', [],
function() {
self._dbInfo = dbInfo;
resolve();
}, function(t, error) {
reject(error);
});
});
});
return serializerPromise.then(function(lib) {
serializer = lib;
return dbInfoPromise;
});
}
function getItem(key, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
dbInfo.db.transaction(function(t) {
t.executeSql('SELECT * FROM ' + dbInfo.storeName +
' WHERE key = ? LIMIT 1', [key],
function(t, results) {
var result = results.rows.length ?
results.rows.item(0).value : null;
// Check to see if this is serialized content we need to
// unpack.
if (result) {
result = serializer.deserialize(result);
}
resolve(result);
}, function(t, error) {
reject(error);
});
});
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
function iterate(iterator, callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
dbInfo.db.transaction(function(t) {
t.executeSql('SELECT * FROM ' + dbInfo.storeName, [],
function(t, results) {
var rows = results.rows;
var length = rows.length;
for (var i = 0; i < length; i++) {
var item = rows.item(i);
var result = item.value;
// Check to see if this is serialized content
// we need to unpack.
if (result) {
result = serializer.deserialize(result);
}
result = iterator(result, item.key, i + 1);
// void(0) prevents problems with redefinition
// of `undefined`.
if (result !== void(0)) {
resolve(result);
return;
}
}
resolve();
}, function(t, error) {
reject(error);
});
});
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
function setItem(key, value, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
// The localStorage API doesn't return undefined values in an
// "expected" way, so undefined is always cast to null in all
// drivers. See: https://github.com/mozilla/localForage/pull/42
if (value === undefined) {
value = null;
}
// Save the original value to pass to the callback.
var originalValue = value;
serializer.serialize(value, function(value, error) {
if (error) {
reject(error);
} else {
var dbInfo = self._dbInfo;
dbInfo.db.transaction(function(t) {
t.executeSql('INSERT OR REPLACE INTO ' +
dbInfo.storeName +
' (key, value) VALUES (?, ?)',
[key, value], function() {
resolve(originalValue);
}, function(t, error) {
reject(error);
});
}, function(sqlError) { // The transaction failed; check
// to see if it's a quota error.
if (sqlError.code === sqlError.QUOTA_ERR) {
// We reject the callback outright for now, but
// it's worth trying to re-run the transaction.
// Even if the user accepts the prompt to use
// more storage on Safari, this error will
// be called.
//
// TODO: Try to re-run the transaction.
reject(sqlError);
}
});
}
});
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
function removeItem(key, callback) {
var self = this;
// Cast the key to a string, as that's all we can set as a key.
if (typeof key !== 'string') {
window.console.warn(key +
' used as a key, but it is not a string.');
key = String(key);
}
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
dbInfo.db.transaction(function(t) {
t.executeSql('DELETE FROM ' + dbInfo.storeName +
' WHERE key = ?', [key], function() {
resolve();
}, function(t, error) {
reject(error);
});
});
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
// Deletes every item in the table.
// TODO: Find out if this resets the AUTO_INCREMENT number.
function clear(callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
dbInfo.db.transaction(function(t) {
t.executeSql('DELETE FROM ' + dbInfo.storeName, [],
function() {
resolve();
}, function(t, error) {
reject(error);
});
});
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
// Does a simple `COUNT(key)` to get the number of items stored in
// localForage.
function length(callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
dbInfo.db.transaction(function(t) {
// Ahhh, SQL makes this one soooooo easy.
t.executeSql('SELECT COUNT(key) as c FROM ' +
dbInfo.storeName, [], function(t, results) {
var result = results.rows.item(0).c;
resolve(result);
}, function(t, error) {
reject(error);
});
});
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
// Return the key located at key index X; essentially gets the key from a
// `WHERE id = ?`. This is the most efficient way I can think to implement
// this rarely-used (in my experience) part of the API, but it can seem
// inconsistent, because we do `INSERT OR REPLACE INTO` on `setItem()`, so
// the ID of each key will change every time it's updated. Perhaps a stored
// procedure for the `setItem()` SQL would solve this problem?
// TODO: Don't change ID on `setItem()`.
function key(n, callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
dbInfo.db.transaction(function(t) {
t.executeSql('SELECT key FROM ' + dbInfo.storeName +
' WHERE id = ? LIMIT 1', [n + 1],
function(t, results) {
var result = results.rows.length ?
results.rows.item(0).key : null;
resolve(result);
}, function(t, error) {
reject(error);
});
});
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
function keys(callback) {
var self = this;
var promise = new Promise(function(resolve, reject) {
self.ready().then(function() {
var dbInfo = self._dbInfo;
dbInfo.db.transaction(function(t) {
t.executeSql('SELECT key FROM ' + dbInfo.storeName, [],
function(t, results) {
var keys = [];
for (var i = 0; i < results.rows.length; i++) {
keys.push(results.rows.item(i).key);
}
resolve(keys);
}, function(t, error) {
reject(error);
});
});
})['catch'](reject);
});
executeCallback(promise, callback);
return promise;
}
function executeCallback(promise, callback) {
if (callback) {
promise.then(function(result) {
callback(null, result);
}, function(error) {
callback(error);
});
}
}
var webSQLStorage = {
_driver: 'webSQLStorage',
_initStorage: _initStorage,
iterate: iterate,
getItem: getItem,
setItem: setItem,
removeItem: removeItem,
clear: clear,
length: length,
key: key,
keys: keys
};
if (moduleType === ModuleType.DEFINE) {
define('webSQLStorage', function() {
return webSQLStorage;
});
} else if (moduleType === ModuleType.EXPORT) {
module.exports = webSQLStorage;
} else {
this.webSQLStorage = webSQLStorage;
}
}).call(window);
},{"./../utils/serializer":39,"promise":33}],38:[function(_dereq_,module,exports){
(function() {
'use strict';
// Promises!
var Promise = (typeof module !== 'undefined' && module.exports) ?
_dereq_('promise') : this.Promise;
// Custom drivers are stored here when `defineDriver()` is called.
// They are shared across all instances of localForage.
var CustomDrivers = {};
var DriverType = {
INDEXEDDB: 'asyncStorage',
LOCALSTORAGE: 'localStorageWrapper',
WEBSQL: 'webSQLStorage'
};
var DefaultDriverOrder = [
DriverType.INDEXEDDB,
DriverType.WEBSQL,
DriverType.LOCALSTORAGE
];
var LibraryMethods = [
'clear',
'getItem',
'iterate',
'key',
'keys',
'length',
'removeItem',
'setItem'
];
var ModuleType = {
DEFINE: 1,
EXPORT: 2,
WINDOW: 3
};
var DefaultConfig = {
description: '',
driver: DefaultDriverOrder.slice(),
name: 'localforage',
// Default DB size is _JUST UNDER_ 5MB, as it's the highest size
// we can use without a prompt.
size: 4980736,
storeName: 'keyvaluepairs',
version: 1.0
};
// Attaching to window (i.e. no module loader) is the assumed,
// simple default.
var moduleType = ModuleType.WINDOW;
// Find out what kind of module setup we have; if none, we'll just attach
// localForage to the main window.
if (typeof module !== 'undefined' && module.exports) {
moduleType = ModuleType.EXPORT;
} else if (typeof define === 'function' && define.amd) {
moduleType = ModuleType.DEFINE;
}
// Check to see if IndexedDB is available and if it is the latest
// implementation; it's our preferred backend library. We use "_spec_test"
// as the name of the database because it's not the one we'll operate on,
// but it's useful to make sure its using the right spec.
// See: https://github.com/mozilla/localForage/issues/128
var driverSupport = (function(self) {
// Initialize IndexedDB; fall back to vendor-prefixed versions
// if needed.
var indexedDB = indexedDB || self.indexedDB || self.webkitIndexedDB ||
self.mozIndexedDB || self.OIndexedDB ||
self.msIndexedDB;
var result = {};
result[DriverType.WEBSQL] = !!self.openDatabase;
result[DriverType.INDEXEDDB] = !!(function() {
// We mimic PouchDB here; just UA test for Safari (which, as of
// iOS 8/Yosemite, doesn't properly support IndexedDB).
// IndexedDB support is broken and different from Blink's.
// This is faster than the test case (and it's sync), so we just
// do this. *SIGH*
// http://bl.ocks.org/nolanlawson/raw/c83e9039edf2278047e9/
//
// We test for openDatabase because IE Mobile identifies itself
// as Safari. Oh the lulz...
if (typeof self.openDatabase !== 'undefined' && self.navigator &&
self.navigator.userAgent &&
/Safari/.test(self.navigator.userAgent) &&
!/Chrome/.test(self.navigator.userAgent)) {
return false;
}
try {
return indexedDB &&
typeof indexedDB.open === 'function' &&
// Some Samsung/HTC Android 4.0-4.3 devices
// have older IndexedDB specs; if this isn't available
// their IndexedDB is too old for us to use.
// (Replaces the onupgradeneeded test.)
typeof self.IDBKeyRange !== 'undefined';
} catch (e) {
return false;
}
})();
result[DriverType.LOCALSTORAGE] = !!(function() {
try {
return (self.localStorage &&
('setItem' in self.localStorage) &&
(self.localStorage.setItem));
} catch (e) {
return false;
}
})();
return result;
})(this);
var isArray = Array.isArray || function(arg) {
return Object.prototype.toString.call(arg) === '[object Array]';
};
function callWhenReady(localForageInstance, libraryMethod) {
localForageInstance[libraryMethod] = function() {
var _args = arguments;
return localForageInstance.ready().then(function() {
return localForageInstance[libraryMethod].apply(localForageInstance, _args);
});
};
}
function extend() {
for (var i = 1; i < arguments.length; i++) {
var arg = arguments[i];
if (arg) {
for (var key in arg) {
if (arg.hasOwnProperty(key)) {
if (isArray(arg[key])) {
arguments[0][key] = arg[key].slice();
} else {
arguments[0][key] = arg[key];
}
}
}
}
}
return arguments[0];
}
function isLibraryDriver(driverName) {
for (var driver in DriverType) {
if (DriverType.hasOwnProperty(driver) &&
DriverType[driver] === driverName) {
return true;
}
}
return false;
}
var globalObject = this;
function LocalForage(options) {
this._config = extend({}, DefaultConfig, options);
this._driverSet = null;
this._ready = false;
this._dbInfo = null;
// Add a stub for each driver API method that delays the call to the
// corresponding driver method until localForage is ready. These stubs
// will be replaced by the driver methods as soon as the driver is
// loaded, so there is no performance impact.
for (var i = 0; i < LibraryMethods.length; i++) {
callWhenReady(this, LibraryMethods[i]);
}
this.setDriver(this._config.driver);
}
LocalForage.prototype.INDEXEDDB = DriverType.INDEXEDDB;
LocalForage.prototype.LOCALSTORAGE = DriverType.LOCALSTORAGE;
LocalForage.prototype.WEBSQL = DriverType.WEBSQL;
// Set any config values for localForage; can be called anytime before
// the first API call (e.g. `getItem`, `setItem`).
// We loop through options so we don't overwrite existing config
// values.
LocalForage.prototype.config = function(options) {
// If the options argument is an object, we use it to set values.
// Otherwise, we return either a specified config value or all
// config values.
if (typeof(options) === 'object') {
// If localforage is ready and fully initialized, we can't set
// any new configuration values. Instead, we return an error.
if (this._ready) {
return new Error("Can't call config() after localforage " +
'has been used.');
}
for (var i in options) {
if (i === 'storeName') {
options[i] = options[i].replace(/\W/g, '_');
}
this._config[i] = options[i];
}
// after all config options are set and
// the driver option is used, try setting it
if ('driver' in options && options.driver) {
this.setDriver(this._config.driver);
}
return true;
} else if (typeof(options) === 'string') {
return this._config[options];
} else {
return this._config;
}
};
// Used to define a custom driver, shared across all instances of
// localForage.
LocalForage.prototype.defineDriver = function(driverObject, callback,
errorCallback) {
var defineDriver = new Promise(function(resolve, reject) {
try {
var driverName = driverObject._driver;
var complianceError = new Error(
'Custom driver not compliant; see ' +
'https://mozilla.github.io/localForage/#definedriver'
);
var namingError = new Error(
'Custom driver name already in use: ' + driverObject._driver
);
// A driver name should be defined and not overlap with the
// library-defined, default drivers.
if (!driverObject._driver) {
reject(complianceError);
return;
}
if (isLibraryDriver(driverObject._driver)) {
reject(namingError);
return;
}
var customDriverMethods = LibraryMethods.concat('_initStorage');
for (var i = 0; i < customDriverMethods.length; i++) {
var customDriverMethod = customDriverMethods[i];
if (!customDriverMethod ||
!driverObject[customDriverMethod] ||
typeof driverObject[customDriverMethod] !== 'function') {
reject(complianceError);
return;
}
}
var supportPromise = Promise.resolve(true);
if ('_support' in driverObject) {
if (driverObject._support && typeof driverObject._support === 'function') {
supportPromise = driverObject._support();
} else {
supportPromise = Promise.resolve(!!driverObject._support);
}
}
supportPromise.then(function(supportResult) {
driverSupport[driverName] = supportResult;
CustomDrivers[driverName] = driverObject;
resolve();
}, reject);
} catch (e) {
reject(e);
}
});
defineDriver.then(callback, errorCallback);
return defineDriver;
};
LocalForage.prototype.driver = function() {
return this._driver || null;
};
LocalForage.prototype.ready = function(callback) {
var self = this;
var ready = new Promise(function(resolve, reject) {
self._driverSet.then(function() {
if (self._ready === null) {
self._ready = self._initStorage(self._config);
}
self._ready.then(resolve, reject);
})['catch'](reject);
});
ready.then(callback, callback);
return ready;
};
LocalForage.prototype.setDriver = function(drivers, callback,
errorCallback) {
var self = this;
if (typeof drivers === 'string') {
drivers = [drivers];
}
this._driverSet = new Promise(function(resolve, reject) {
var driverName = self._getFirstSupportedDriver(drivers);
var error = new Error('No available storage method found.');
if (!driverName) {
self._driverSet = Promise.reject(error);
reject(error);
return;
}
self._dbInfo = null;
self._ready = null;
if (isLibraryDriver(driverName)) {
// We allow localForage to be declared as a module or as a
// library available without AMD/require.js.
if (moduleType === ModuleType.DEFINE) {
_dereq_([driverName], function(lib) {
self._extend(lib);
resolve();
});
return;
} else if (moduleType === ModuleType.EXPORT) {
// Making it browserify friendly
var driver;
switch (driverName) {
case self.INDEXEDDB:
driver = _dereq_('./drivers/indexeddb');
break;
case self.LOCALSTORAGE:
driver = _dereq_('./drivers/localstorage');
break;
case self.WEBSQL:
driver = _dereq_('./drivers/websql');
}
self._extend(driver);
} else {
self._extend(globalObject[driverName]);
}
} else if (CustomDrivers[driverName]) {
self._extend(CustomDrivers[driverName]);
} else {
self._driverSet = Promise.reject(error);
reject(error);
return;
}
resolve();
});
function setDriverToConfig() {
self._config.driver = self.driver();
}
this._driverSet.then(setDriverToConfig, setDriverToConfig);
this._driverSet.then(callback, errorCallback);
return this._driverSet;
};
LocalForage.prototype.supports = function(driverName) {
return !!driverSupport[driverName];
};
LocalForage.prototype._extend = function(libraryMethodsAndProperties) {
extend(this, libraryMethodsAndProperties);
};
// Used to determine which driver we should use as the backend for this
// instance of localForage.
LocalForage.prototype._getFirstSupportedDriver = function(drivers) {
if (drivers && isArray(drivers)) {
for (var i = 0; i < drivers.length; i++) {
var driver = drivers[i];
if (this.supports(driver)) {
return driver;
}
}
}
return null;
};
LocalForage.prototype.createInstance = function(options) {
return new LocalForage(options);
};
// The actual localForage object that we expose as a module or via a
// global. It's extended by pulling in one of our other libraries.
var localForage = new LocalForage();
// We allow localForage to be declared as a module or as a library
// available without AMD/require.js.
if (moduleType === ModuleType.DEFINE) {
define('localforage', function() {
return localForage;
});
} else if (moduleType === ModuleType.EXPORT) {
module.exports = localForage;
} else {
this.localforage = localForage;
}
}).call(window);
},{"./drivers/indexeddb":35,"./drivers/localstorage":36,"./drivers/websql":37,"promise":33}],39:[function(_dereq_,module,exports){
(function() {
'use strict';
// Sadly, the best way to save binary data in WebSQL/localStorage is serializing
// it to Base64, so this is how we store it to prevent very strange errors with less
// verbose ways of binary <-> string data storage.
var BASE_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
var SERIALIZED_MARKER = '__lfsc__:';
var SERIALIZED_MARKER_LENGTH = SERIALIZED_MARKER.length;
// OMG the serializations!
var TYPE_ARRAYBUFFER = 'arbf';
var TYPE_BLOB = 'blob';
var TYPE_INT8ARRAY = 'si08';
var TYPE_UINT8ARRAY = 'ui08';
var TYPE_UINT8CLAMPEDARRAY = 'uic8';
var TYPE_INT16ARRAY = 'si16';
var TYPE_INT32ARRAY = 'si32';
var TYPE_UINT16ARRAY = 'ur16';
var TYPE_UINT32ARRAY = 'ui32';
var TYPE_FLOAT32ARRAY = 'fl32';
var TYPE_FLOAT64ARRAY = 'fl64';
var TYPE_SERIALIZED_MARKER_LENGTH = SERIALIZED_MARKER_LENGTH +
TYPE_ARRAYBUFFER.length;
// Serialize a value, afterwards executing a callback (which usually
// instructs the `setItem()` callback/promise to be executed). This is how
// we store binary data with localStorage.
function serialize(value, callback) {
var valueString = '';
if (value) {
valueString = value.toString();
}
// Cannot use `value instanceof ArrayBuffer` or such here, as these
// checks fail when running the tests using casper.js...
//
// TODO: See why those tests fail and use a better solution.
if (value && (value.toString() === '[object ArrayBuffer]' ||
value.buffer &&
value.buffer.toString() === '[object ArrayBuffer]')) {
// Convert binary arrays to a string and prefix the string with
// a special marker.
var buffer;
var marker = SERIALIZED_MARKER;
if (value instanceof ArrayBuffer) {
buffer = value;
marker += TYPE_ARRAYBUFFER;
} else {
buffer = value.buffer;
if (valueString === '[object Int8Array]') {
marker += TYPE_INT8ARRAY;
} else if (valueString === '[object Uint8Array]') {
marker += TYPE_UINT8ARRAY;
} else if (valueString === '[object Uint8ClampedArray]') {
marker += TYPE_UINT8CLAMPEDARRAY;
} else if (valueString === '[object Int16Array]') {
marker += TYPE_INT16ARRAY;
} else if (valueString === '[object Uint16Array]') {
marker += TYPE_UINT16ARRAY;
} else if (valueString === '[object Int32Array]') {
marker += TYPE_INT32ARRAY;
} else if (valueString === '[object Uint32Array]') {
marker += TYPE_UINT32ARRAY;
} else if (valueString === '[object Float32Array]') {
marker += TYPE_FLOAT32ARRAY;
} else if (valueString === '[object Float64Array]') {
marker += TYPE_FLOAT64ARRAY;
} else {
callback(new Error('Failed to get type for BinaryArray'));
}
}
callback(marker + bufferToString(buffer));
} else if (valueString === '[object Blob]') {
// Conver the blob to a binaryArray and then to a string.
var fileReader = new FileReader();
fileReader.onload = function() {
var str = bufferToString(this.result);
callback(SERIALIZED_MARKER + TYPE_BLOB + str);
};
fileReader.readAsArrayBuffer(value);
} else {
try {
callback(JSON.stringify(value));
} catch (e) {
window.console.error("Couldn't convert value into a JSON " +
'string: ', value);
callback(null, e);
}
}
}
// Deserialize data we've inserted into a value column/field. We place
// special markers into our strings to mark them as encoded; this isn't
// as nice as a meta field, but it's the only sane thing we can do whilst
// keeping localStorage support intact.
//
// Oftentimes this will just deserialize JSON content, but if we have a
// special marker (SERIALIZED_MARKER, defined above), we will extract
// some kind of arraybuffer/binary data/typed array out of the string.
function deserialize(value) {
// If we haven't marked this string as being specially serialized (i.e.
// something other than serialized JSON), we can just return it and be
// done with it.
if (value.substring(0,
SERIALIZED_MARKER_LENGTH) !== SERIALIZED_MARKER) {
return JSON.parse(value);
}
// The following code deals with deserializing some kind of Blob or
// TypedArray. First we separate out the type of data we're dealing
// with from the data itself.
var serializedString = value.substring(TYPE_SERIALIZED_MARKER_LENGTH);
var type = value.substring(SERIALIZED_MARKER_LENGTH,
TYPE_SERIALIZED_MARKER_LENGTH);
var buffer = stringToBuffer(serializedString);
// Return the right type based on the code/type set during
// serialization.
switch (type) {
case TYPE_ARRAYBUFFER:
return buffer;
case TYPE_BLOB:
return new Blob([buffer]);
case TYPE_INT8ARRAY:
return new Int8Array(buffer);
case TYPE_UINT8ARRAY:
return new Uint8Array(buffer);
case TYPE_UINT8CLAMPEDARRAY:
return new Uint8ClampedArray(buffer);
case TYPE_INT16ARRAY:
return new Int16Array(buffer);
case TYPE_UINT16ARRAY:
return new Uint16Array(buffer);
case TYPE_INT32ARRAY:
return new Int32Array(buffer);
case TYPE_UINT32ARRAY:
return new Uint32Array(buffer);
case TYPE_FLOAT32ARRAY:
return new Float32Array(buffer);
case TYPE_FLOAT64ARRAY:
return new Float64Array(buffer);
default:
throw new Error('Unkown type: ' + type);
}
}
function stringToBuffer(serializedString) {
// Fill the string into a ArrayBuffer.
var bufferLength = serializedString.length * 0.75;
var len = serializedString.length;
var i;
var p = 0;
var encoded1, encoded2, encoded3, encoded4;
if (serializedString[serializedString.length - 1] === '=') {
bufferLength--;
if (serializedString[serializedString.length - 2] === '=') {
bufferLength--;
}
}
var buffer = new ArrayBuffer(bufferLength);
var bytes = new Uint8Array(buffer);
for (i = 0; i < len; i+=4) {
encoded1 = BASE_CHARS.indexOf(serializedString[i]);
encoded2 = BASE_CHARS.indexOf(serializedString[i+1]);
encoded3 = BASE_CHARS.indexOf(serializedString[i+2]);
encoded4 = BASE_CHARS.indexOf(serializedString[i+3]);
/*jslint bitwise: true */
bytes[p++] = (encoded1 << 2) | (encoded2 >> 4);
bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2);
bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63);
}
return buffer;
}
// Converts a buffer to a string to store, serialized, in the backend
// storage library.
function bufferToString(buffer) {
// base64-arraybuffer
var bytes = new Uint8Array(buffer);
var base64String = '';
var i;
for (i = 0; i < bytes.length; i += 3) {
/*jslint bitwise: true */
base64String += BASE_CHARS[bytes[i] >> 2];
base64String += BASE_CHARS[((bytes[i] & 3) << 4) | (bytes[i + 1] >> 4)];
base64String += BASE_CHARS[((bytes[i + 1] & 15) << 2) | (bytes[i + 2] >> 6)];
base64String += BASE_CHARS[bytes[i + 2] & 63];
}
if ((bytes.length % 3) === 2) {
base64String = base64String.substring(0, base64String.length - 1) + '=';
} else if (bytes.length % 3 === 1) {
base64String = base64String.substring(0, base64String.length - 2) + '==';
}
return base64String;
}
var localforageSerializer = {
serialize: serialize,
deserialize: deserialize,
stringToBuffer: stringToBuffer,
bufferToString: bufferToString
};
if (typeof module !== 'undefined' && module.exports) {
module.exports = localforageSerializer;
} else if (typeof define === 'function' && define.amd) {
define('localforageSerializer', function() {
return localforageSerializer;
});
} else {
this.localforageSerializer = localforageSerializer;
}
}).call(window);
},{}]},{},[1]);<|fim▁end|>
| |
<|file_name|>feature_block.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing."""
import copy
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script, get_legacy_sigopcount_block
from test_framework.key import CECKey
from test_framework.messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
uint256_from_compact,
uint256_from_str,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_DROP,
OP_FALSE,
OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
MAX_BLOCK_SIGOPS = 20000
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
return super().serialize()
class FullBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
self.spendable_outputs = []
# Create a new block
b0 = self.next_block(0)
self.save_spendable_output()
self.sync_blocks([b0])
# Allow the block to mature
blocks = []
for i in range(99):
blocks.append(self.next_block(5000 + i))
self.save_spendable_output()
self.sync_blocks(blocks)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
b1 = self.next_block(1, spend=out[0])
self.save_spendable_output()
b2 = self.next_block(2, spend=out[1])
self.save_spendable_output()
self.sync_blocks([b1, b2])
# Fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
txout_b3 = b3.vtx[1]
self.sync_blocks([b3], False)
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
self.log.info("Reorg to a longer chain")
b4 = self.next_block(4, spend=out[2])
self.sync_blocks([b4])
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
self.move_tip(2)
b5 = self.next_block(5, spend=out[2])
self.save_spendable_output()
self.sync_blocks([b5], False)
self.log.info("Reorg back to the original chain")
b6 = self.next_block(6, spend=out[3])
self.sync_blocks([b6], True)
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain with a double spend, even if it is longer")
self.move_tip(5)
b7 = self.next_block(7, spend=out[2])
self.sync_blocks([b7], False)
b8 = self.next_block(8, spend=out[4])
self.sync_blocks([b8], False, reconnect=True)
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block where the miner creates too much coinbase reward")
self.move_tip(6)
b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b9], False, 16, b'bad-cb-amount', reconnect=True)
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
self.move_tip(5)
b10 = self.next_block(10, spend=out[3])
self.sync_blocks([b10], False)
b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b11], False, 16, b'bad-cb-amount', reconnect=True)
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
self.move_tip(5)
b12 = self.next_block(12, spend=out[3])
self.save_spendable_output()
b13 = self.next_block(13, spend=out[4])
self.save_spendable_output()
b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
self.sync_blocks([b12, b13, b14], False, 16, b'bad-cb-amount', reconnect=True)
# New tip should be b13.
assert_equal(node.getbestblockhash(), b13.hash)
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block with lots of checksigs")
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
self.move_tip(13)
b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
self.save_spendable_output()
self.sync_blocks([b15], True)
self.log.info("Reject a block with too many checksigs")
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
self.sync_blocks([b16], False, 16, b'bad-blk-sigops', reconnect=True)
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx")
self.move_tip(15)
b17 = self.next_block(17, spend=txout_b3)
self.sync_blocks([b17], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)")
self.move_tip(13)
b18 = self.next_block(18, spend=txout_b3)
self.sync_blocks([b18], False)
b19 = self.next_block(19, spend=out[6])
self.sync_blocks([b19], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase.")
self.move_tip(15)
b20 = self.next_block(20, spend=out[7])
self.sync_blocks([b20], False, 16, b'bad-txns-premature-spend-of-coinbase')
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase (on a forked chain)")
self.move_tip(13)
b21 = self.next_block(21, spend=out[6])
self.sync_blocks([b21], False)
b22 = self.next_block(22, spend=out[5])
self.sync_blocks([b22], False, 16, b'bad-txns-premature-spend-of-coinbase')
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
self.move_tip(15)
b23 = self.next_block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = self.update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
self.sync_blocks([b23], True)
self.save_spendable_output()
self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1")
self.move_tip(15)
b24 = self.next_block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length + 1)])
tx.vout = [CTxOut(0, script_output)]
b24 = self.update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1)
self.sync_blocks([b24], False, 16, b'bad-blk-length', reconnect=True)
b25 = self.next_block(25, spend=out[7])
self.sync_blocks([b25], False)
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with coinbase input script size out of range")
self.move_tip(15)
b26 = self.next_block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = self.update_block(26, [])
self.sync_blocks([b26], False, 16, b'bad-cb-length', reconnect=True)
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = self.next_block(27, spend=out[7])
self.sync_blocks([b27], False)
# Now try a too-large-coinbase script
self.move_tip(15)
b28 = self.next_block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = self.update_block(28, [])
self.sync_blocks([b28], False, 16, b'bad-cb-length', reconnect=True)
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = self.next_block(29, spend=out[7])
self.sync_blocks([b29], False)
# b30 has a max-sized coinbase scriptSig.
self.move_tip(23)
b30 = self.next_block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = self.update_block(30, [])
self.sync_blocks([b30], True)
self.save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops")
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
self.sync_blocks([b31], True)
self.save_spendable_output()
# this goes over the limit because the coinbase has one sigop
self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = self.next_block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
self.sync_blocks([b32], False, 16, b'bad-blk-sigops', reconnect=True)
# CHECKMULTISIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops")
self.move_tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs)
self.sync_blocks([b33], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
b34 = self.next_block(34, spend=out[10], script=too_many_multisigs)
self.sync_blocks([b34], False, 16, b'bad-blk-sigops', reconnect=True)
# CHECKSIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops")
self.move_tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs)
self.sync_blocks([b35], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops")
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
b36 = self.next_block(36, spend=out[11], script=too_many_checksigs)
self.sync_blocks([b36], False, 16, b'bad-blk-sigops', reconnect=True)
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
self.log.info("Reject a block spending transaction from a block which failed to connect")
self.move_tip(35)
b37 = self.next_block(37, spend=out[11])
txout_b37 = b37.vtx[1]
tx = self.create_and_sign_transaction(out[11], 0)
b37 = self.update_block(37, [tx])
self.sync_blocks([b37], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
self.move_tip(35)
b38 = self.next_block(38, spend=txout_b37)
self.sync_blocks([b38], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
b39 = self.next_block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = self.create_tx(spend, 0, 1, p2sh_script)
tx.vout.append(CTxOut(spend.vout[0].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend)
tx.rehash()
b39 = self.update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size = len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = self.update_block(39, [])
self.sync_blocks([b39], True)
self.save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
self.log.info("Reject a block with too many P2SH sigops")
self.move_tip(39)
b40 = self.next_block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes + 1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
self.update_block(40, new_txs)
self.sync_blocks([b40], False, 16, b'bad-blk-sigops', reconnect=True)
# same as b40, but one less sigop
self.log.info("Accept a block with the max number of P2SH sigops")
self.move_tip(39)
b41 = self.next_block(41, spend=None)
self.update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
self.update_block(41, [tx])
self.sync_blocks([b41], True)
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
self.move_tip(39)
b42 = self.next_block(42, spend=out[12])
self.save_spendable_output()
b43 = self.next_block(43, spend=out[13])
self.save_spendable_output()
self.sync_blocks([b42, b43], True)
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
self.log.info("Build block 44 manually")
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
self.sync_blocks([b44], True)
self.log.info("Reject a block with a non-coinbase as the first tx")
non_coinbase = self.create_tx(out[15], 0, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1
self.tip = b45
self.blocks[45] = b45
self.sync_blocks([b45], False, 16, b'bad-cb-missing', reconnect=True)
self.log.info("Reject a block with no transactions")
self.move_tip(44)
b46 = CBlock()
b46.nTime = b44.nTime + 1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
self.sync_blocks([b46], False, 16, b'bad-blk-length', reconnect=True)
self.log.info("Reject a block with invalid work")
self.move_tip(44)
b47 = self.next_block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.sha256 < target:
b47.nNonce += 1
b47.rehash()
self.sync_blocks([b47], False, request_block=False)
self.log.info("Reject a block with a timestamp >2 hours in the future")
self.move_tip(44)
b48 = self.next_block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
self.sync_blocks([b48], False, request_block=False)
self.log.info("Reject a block with invalid merkle hash")
self.move_tip(44)
b49 = self.next_block(49)
b49.hashMerkleRoot += 1
b49.solve()
self.sync_blocks([b49], False, 16, b'bad-txnmrklroot', reconnect=True)
self.log.info("Reject a block with incorrect POW limit")
self.move_tip(44)
b50 = self.next_block(50)
b50.nBits = b50.nBits - 1
b50.solve()
self.sync_blocks([b50], False, request_block=False, reconnect=True)
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
b51 = self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.sync_blocks([b51], False, 16, b'bad-cb-multiple', reconnect=True)
self.log.info("Reject a block with duplicate transactions")
# Note: txns have to be in the right position in the merkle tree to trigger this error
self.move_tip(44)
b52 = self.next_block(52, spend=out[15])
tx = self.create_tx(b52.vtx[1], 0, 1)
b52 = self.update_block(52, [tx, tx])
self.sync_blocks([b52], False, 16, b'bad-txns-duplicate', reconnect=True)
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
self.move_tip(43)
b53 = self.next_block(53, spend=out[14])
self.sync_blocks([b53], False)
self.save_spendable_output()
self.log.info("Reject a block with timestamp before MedianTimePast")
b54 = self.next_block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
self.sync_blocks([b54], False, request_block=False)
# valid timestamp
self.move_tip(53)
b55 = self.next_block(55, spend=out[15])
b55.nTime = b35.nTime
self.update_block(55, [])
self.sync_blocks([b55], True)
self.save_spendable_output()
# Test Merkle tree malleability
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
self.move_tip(55)
b57 = self.next_block(57)
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx), 3)
b56 = self.update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
self.sync_blocks([b56], False, 16, b'bad-txns-duplicate', reconnect=True)
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
b57p2 = self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
tx3 = self.create_tx(tx2, 0, 1)
tx4 = self.create_tx(tx3, 0, 1)
b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx), 6)
b56p2 = self.update_block("b56p2", [tx3, tx4])
self.sync_blocks([b56p2], False, 16, b'bad-txns-duplicate', reconnect=True)
self.move_tip("57p2")
self.sync_blocks([b57p2], True)
self.move_tip(57)
self.sync_blocks([b57], False) # The tip is not updated because 57p2 seen first
self.save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
b58 = self.next_block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = self.update_block(58, [tx])
self.sync_blocks([b58], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# tx with output value > input value
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip(57)
b59 = self.next_block(59)
tx = self.create_and_sign_transaction(out[17], 51 * COIN)
b59 = self.update_block(59, [tx])
self.sync_blocks([b59], False, 16, b'bad-txns-in-belowout', reconnect=True)
# reset to good chain
self.move_tip(57)
b60 = self.next_block(60, spend=out[17])
self.sync_blocks([b60], True)
self.save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
self.move_tip(60)
b61 = self.next_block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig # Equalize the coinbases
b61.vtx[0].rehash()
b61 = self.update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
self.sync_blocks([b61], False, 16, b'bad-txns-BIP30', reconnect=True)
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip(60)
b62 = self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff # this locktime is non-final
tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = self.update_block(62, [tx])
self.sync_blocks([b62], False, 16, b'bad-txns-nonfinal')
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime")
self.move_tip(60)
b63 = self.next_block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = self.update_block(63, [])
self.sync_blocks([b63], False, 16, b'bad-txns-nonfinal')
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
self.log.info("Accept a valid block even if a bloated version of the block has previously been sent")
self.move_tip(60)
regular_block = self.next_block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = self.update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
self.sync_blocks([b64a], False, 1, b'error parsing message')
# bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently
# resend the header message, it won't send us the getdata message again. Just
# disconnect and reconnect and then call sync_blocks.
# TODO: improve this test to be less dependent on P2P DOS behaviour.
node.disconnect_p2ps()
self.reconnect_p2p()
self.move_tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
b64 = self.update_block(64, [])
self.sync_blocks([b64], True)
self.save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
b65 = self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0)
b65 = self.update_block(65, [tx1, tx2])
self.sync_blocks([b65], True)
self.save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
b66 = self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
b66 = self.update_block(66, [tx2, tx1])
self.sync_blocks([b66], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
self.log.info("Reject a block with a transaction double spending a transaction creted in the same block")
self.move_tip(65)
b67 = self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
tx3 = self.create_and_sign_transaction(tx1, 2)
b67 = self.update_block(67, [tx1, tx2, tx3])
self.sync_blocks([b67], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
b68 = self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 9)
b68 = self.update_block(68, [tx])
self.sync_blocks([b68], False, 16, b'bad-cb-amount', reconnect=True)
self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction")
self.move_tip(65)
b69 = self.next_block(69, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 10)
self.update_block(69, [tx])
self.sync_blocks([b69], True)
self.save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
b70 = self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
b70 = self.update_block(70, [tx])
self.sync_blocks([b70], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
self.move_tip(69)
b72 = self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21], 2)
tx2 = self.create_and_sign_transaction(tx1, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
self.move_tip(71)
self.sync_blocks([b71], False, 16, b'bad-txns-duplicate', reconnect=True)
self.move_tip(72)
self.sync_blocks([b72], True)
self.save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
b73 = self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS + 1] = element_size // 256
a[MAX_BLOCK_SIGOPS + 2] = 0
a[MAX_BLOCK_SIGOPS + 3] = 0
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b73 = self.update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1)
self.sync_blocks([b73], False, 16, b'bad-blk-sigops', reconnect=True)
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
b74 = self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS + 1] = 0xfe
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
a[MAX_BLOCK_SIGOPS + 4] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b74 = self.update_block(74, [tx])
self.sync_blocks([b74], False, 16, b'bad-blk-sigops', reconnect=True)
self.move_tip(72)
b75 = self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS + 1] = 0xff
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b75 = self.update_block(75, [tx])
self.sync_blocks([b75], True)
self.save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
b76 = self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = self.create_and_sign_transaction(out[23], 1, CScript(a))
b76 = self.update_block(76, [tx])
self.sync_blocks([b76], True)
self.save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
b77 = self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24], 10 * COIN)
b77 = self.update_block(77, [tx77])
self.sync_blocks([b77], True)
self.save_spendable_output()
b78 = self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.sync_blocks([b78], True)
b79 = self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.sync_blocks([b79], True)
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.move_tip(77)
b80 = self.next_block(80, spend=out[25])
self.sync_blocks([b80], False, request_block=False)
self.save_spendable_output()
b81 = self.next_block(81, spend=out[26])
self.sync_blocks([b81], False, request_block=False) # other chain is same length
self.save_spendable_output()
b82 = self.next_block(82, spend=out[27])
self.sync_blocks([b82], True) # now this chain is longer, triggers re-org
self.save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
self.log.info("Accept a block with invalid opcodes in dead execution paths")
b83 = self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue, script)
tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
b83 = self.update_block(83, [tx1, tx2])
self.sync_blocks([b83], True)
self.save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
self.log.info("Test re-orging blocks with OP_RETURN in them")
b84 = self.next_block(84)
tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29])
tx1.rehash()
tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN]))
b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5])
self.sync_blocks([b84], True)
self.save_spendable_output()
self.move_tip(83)
b85 = self.next_block(85, spend=out[29])
self.sync_blocks([b85], False) # other chain is same length
b86 = self.next_block(86, spend=out[30])
self.sync_blocks([b86], True)
self.move_tip(84)
b87 = self.next_block(87, spend=out[30])
self.sync_blocks([b87], False) # other chain is same length
self.save_spendable_output()
b88 = self.next_block(88, spend=out[31])
self.sync_blocks([b88], True)
self.save_spendable_output()
# trying to spend the OP_RETURN output is rejected
b89a = self.next_block("89a", spend=out[32])
tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.sync_blocks([b89a], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)")
self.move_tip(88)
LARGE_REORG_SIZE = 1088
blocks = []
spend = out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = self.next_block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = self.update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
blocks.append(b)
self.save_spendable_output()
spend = self.get_spendable_output()
self.sync_blocks(blocks, True, timeout=180)
chain1_tip = i
# now create alt chain of same length
self.move_tip(88)
blocks2 = []
for i in range(89, LARGE_REORG_SIZE + 89):
blocks2.append(self.next_block("alt" + str(i)))
self.sync_blocks(blocks2, False, request_block=False)
# extend alt chain to trigger re-org
block = self.next_block("alt" + str(chain1_tip + 1))
self.sync_blocks([block], True, timeout=180)
# ... and re-org back to the first chain
self.move_tip(chain1_tip)
block = self.next_block(chain1_tip + 1)
self.sync_blocks([block], False, request_block=False)
block = self.next_block(chain1_tip + 2)
self.sync_blocks([block], True, timeout=180)
# Helper methods
################
def add_transactions_to_block(self, block, tx_list):
[tx.rehash() for tx in tx_list]<|fim▁hole|> return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script)
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx):
scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, 0, value, script)
self.sign_tx(tx, spend_tx)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend is None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.vout[0].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = self.create_tx(spend, 0, 1, script) # spend 1 satoshi
self.sign_tx(tx, spend)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
# save the current tip so it can be spent by a later block
def save_spendable_output(self):
self.log.debug("saving spendable output %s" % self.tip.vtx[0])
self.spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output(self):
self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0])
return self.spendable_outputs.pop(0).vtx[0]
# move the tip back to a previous block
def move_tip(self, number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(self, block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
def bootstrap_p2p(self):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
self.nodes[0].add_p2p_connection(P2PDataStore())
# We need to wait for the initial getheaders from the peer before we
# start populating our blockstore. If we don't, then we may run ahead
# to the next subtest before we receive the getheaders. We'd then send
# an INV for the next block and receive two getheaders - one for the
# IBD and one for the INV. We'd respond to both and could get
# unexpectedly disconnected if the DoS score for that error is 50.
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def reconnect_p2p(self):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p()
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True, reconnect=False, timeout=60):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block, timeout=timeout)
if reconnect:
self.reconnect_p2p()
if __name__ == '__main__':
FullBlockTest().main()<|fim▁end|>
|
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
|
<|file_name|>system_tables.go<|end_file_name|><|fim▁begin|>/*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package physical
import (
"strings"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/sqltypes"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/evalengine"
)
func (rp *Route) findSysInfoRoutingPredicatesGen4(predicates []sqlparser.Expr, reservedVars *sqlparser.ReservedVars) error {
for _, pred := range predicates {
isTableSchema, bvName, out, err := extractInfoSchemaRoutingPredicate(pred, reservedVars)
if err != nil {
return err
}
if out == nil {
// we didn't find a predicate to use for routing, continue to look for next predicate
continue
}
if isTableSchema {
rp.SysTableTableSchema = append(rp.SysTableTableSchema, out)
} else {
if rp.SysTableTableName == nil {
rp.SysTableTableName = map[string]evalengine.Expr{}
}
rp.SysTableTableName[bvName] = out
}
}
return nil
}
func extractInfoSchemaRoutingPredicate(in sqlparser.Expr, reservedVars *sqlparser.ReservedVars) (bool, string, evalengine.Expr, error) {
switch cmp := in.(type) {
case *sqlparser.ComparisonExpr:
if cmp.Operator == sqlparser.EqualOp {
isSchemaName, col, other, replaceOther := findOtherComparator(cmp)
if col != nil && shouldRewrite(other) {
evalExpr, err := evalengine.Translate(other, ¬ImplementedSchemaInfoConverter{})
if err != nil {
if strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) {
// This just means we can't rewrite this particular expression,
// not that we have to exit altogether
return false, "", nil, nil
}
return false, "", nil, err
}
var name string
if isSchemaName {
name = sqltypes.BvSchemaName
} else {
name = reservedVars.ReserveColName(col.(*sqlparser.ColName))
}
replaceOther(sqlparser.NewArgument(name))
return isSchemaName, name, evalExpr, nil
}
}
}
return false, "", nil, nil
}
func findOtherComparator(cmp *sqlparser.ComparisonExpr) (bool, sqlparser.Expr, sqlparser.Expr, func(arg sqlparser.Argument)) {
if schema, table := isTableSchemaOrName(cmp.Left); schema || table {
return schema, cmp.Left, cmp.Right, func(arg sqlparser.Argument) {
cmp.Right = arg
}
}
if schema, table := isTableSchemaOrName(cmp.Right); schema || table {
return schema, cmp.Right, cmp.Left, func(arg sqlparser.Argument) {
cmp.Left = arg
}
}
return false, nil, nil, nil
}
func shouldRewrite(e sqlparser.Expr) bool {
switch node := e.(type) {
case *sqlparser.FuncExpr:
// we should not rewrite database() calls against information_schema
return !(node.Name.EqualString("database") || node.Name.EqualString("schema"))
}
return true
}
func isTableSchemaOrName(e sqlparser.Expr) (isTableSchema bool, isTableName bool) {
col, ok := e.(*sqlparser.ColName)
if !ok {
return false, false<|fim▁hole|>}
func isDbNameCol(col *sqlparser.ColName) bool {
return col.Name.EqualString("table_schema") || col.Name.EqualString("constraint_schema") || col.Name.EqualString("schema_name") || col.Name.EqualString("routine_schema")
}
func isTableNameCol(col *sqlparser.ColName) bool {
return col.Name.EqualString("table_name")
}
type notImplementedSchemaInfoConverter struct{}
func (f *notImplementedSchemaInfoConverter) ColumnLookup(*sqlparser.ColName) (int, error) {
return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Comparing table schema name with a column name not yet supported")
}
func (f *notImplementedSchemaInfoConverter) CollationForExpr(sqlparser.Expr) collations.ID {
return collations.Unknown
}
func (f *notImplementedSchemaInfoConverter) DefaultCollation() collations.ID {
return collations.Default()
}<|fim▁end|>
|
}
return isDbNameCol(col), isTableNameCol(col)
|
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for unused-files-webpack-plugin 3.4
// Project: https://github.com/tomchentw/unused-files-webpack-plugin
// Definitions by: Vladimir Grenaderov <https://github.com/VladimirGrenaderov>
// Max Boguslavskiy <https://github.com/maxbogus>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.3
import { Plugin } from 'webpack';
export interface Options {
patterns?: string[];
failOnUnused: boolean;
globOptions?: {
ignore?: string | string[];
};
ignore?: string | string[];
cwd?: string;
}
<|fim▁hole|>}<|fim▁end|>
|
export class UnusedFilesWebpackPlugin extends Plugin {
constructor(options: Options);
|
<|file_name|>factory.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2017 DST Controls
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""<|fim▁hole|>from future.builtins import *
from future.utils import iteritems
def create(factory, thing, session, webapi=None):
"""
Return an object created with factory
:param webapi:
:param factory:
:param params:
:param session:
:return:
"""
payload = dict(map(lambda k_v: (k_v[0].lower(), k_v[1]), iteritems(thing)))
# added to avoid creating Value objects if the value was considered bad values
# but we don't need this since we don't want the library to cull bad values that
# the pi web api gave us.
#
# if 'good' in payload:
# if not payload['good']:
# return None
payload.update({'session': session, 'webapi': webapi})
thing = factory.create(**payload)
return thing
class Factory(object):
def __init__(self, type_):
self.type = type_
def create(self, **kwargs):
return self.type(**kwargs)<|fim▁end|>
|
osisoftpy.factory
~~~~~~~~~~~~
"""
from __future__ import (absolute_import, division, unicode_literals)
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
class Entry(models.Model):
title = models.CharField(max_length=200)
date = models.DateTimeField()
class Meta:
ordering = ('date',)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/blog/%s/" % self.pk
class Article(models.Model):
title = models.CharField(max_length=200)
entry = models.ForeignKey(Entry)
<|fim▁hole|><|fim▁end|>
|
def __unicode__(self):
return self.title
|
<|file_name|>test_dolfin_linear_solver.py<|end_file_name|><|fim▁begin|># Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import pytest
from numpy import isclose
from dolfin import (assemble, dx, Function, FunctionSpace, grad, inner, solve, TestFunction, TrialFunction,
UnitSquareMesh)
from rbnics.backends import LinearSolver as FactoryLinearSolver
from rbnics.backends.dolfin import LinearSolver as DolfinLinearSolver
from test_dolfin_utils import RandomDolfinFunction
LinearSolver = None
AllLinearSolver = {"dolfin": DolfinLinearSolver, "factory": FactoryLinearSolver}
class Data(object):
def __init__(self, Th, callback_type):
# Create mesh and define function space
mesh = UnitSquareMesh(Th, Th)
self.V = FunctionSpace(mesh, "Lagrange", 1)
# Define variational problem
u = TrialFunction(self.V)
v = TestFunction(self.V)
self.a = inner(grad(u), grad(v)) * dx + inner(u, v) * dx
self.f = lambda g: g * v * dx
# Define callback function depending on callback type
assert callback_type in ("form callbacks", "tensor callbacks")
if callback_type == "form callbacks":
def callback(arg):
return arg
elif callback_type == "tensor callbacks":
def callback(arg):
return assemble(arg)
self.callback_type = callback_type
self.callback = callback
def generate_random(self):
# Generate random rhs
g = RandomDolfinFunction(self.V)<|fim▁hole|> def evaluate_builtin(self, a, f):
a = self.callback(a)
f = self.callback(f)
result_builtin = Function(self.V)
if self.callback_type == "form callbacks":
solve(a == f, result_builtin, solver_parameters={"linear_solver": "mumps"})
elif self.callback_type == "tensor callbacks":
solve(a, result_builtin.vector(), f, "mumps")
return result_builtin
def evaluate_backend(self, a, f):
a = self.callback(a)
f = self.callback(f)
result_backend = Function(self.V)
solver = LinearSolver(a, result_backend, f)
solver.set_parameters({
"linear_solver": "mumps"
})
solver.solve()
return result_backend
def assert_backend(self, a, f, result_backend):
result_builtin = self.evaluate_builtin(a, f)
error = Function(self.V)
error.vector().add_local(+ result_backend.vector().get_local())
error.vector().add_local(- result_builtin.vector().get_local())
error.vector().apply("add")
relative_error = error.vector().norm("l2") / result_builtin.vector().norm("l2")
assert isclose(relative_error, 0., atol=1e-12)
@pytest.mark.parametrize("Th", [2**i for i in range(3, 9)])
@pytest.mark.parametrize("callback_type", ["form callbacks", "tensor callbacks"])
@pytest.mark.parametrize("test_type", ["builtin"] + list(AllLinearSolver.keys()))
def test_dolfin_linear_solver(Th, callback_type, test_type, benchmark):
data = Data(Th, callback_type)
print("Th = " + str(Th) + ", Nh = " + str(data.V.dim()))
if test_type == "builtin":
print("Testing " + test_type + ", callback_type = " + callback_type)
benchmark(data.evaluate_builtin, setup=data.generate_random)
else:
print("Testing " + test_type + " backend" + ", callback_type = " + callback_type)
global LinearSolver
LinearSolver = AllLinearSolver[test_type]
benchmark(data.evaluate_backend, setup=data.generate_random, teardown=data.assert_backend)<|fim▁end|>
|
# Return
return (self.a, self.f(g))
|
<|file_name|>lint-ctypes.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(improper_ctypes)]
#![feature(rustc_private)]
#![allow(private_in_public)]
extern crate libc;
use std::marker::PhantomData;
trait Mirror { type It: ?Sized; }
impl<T: ?Sized> Mirror for T { type It = Self; }
#[repr(C)]<|fim▁hole|>pub struct StructWithProjection(*mut <StructWithProjection as Mirror>::It);
#[repr(C)]
pub struct StructWithProjectionAndLifetime<'a>(
&'a mut <StructWithProjectionAndLifetime<'a> as Mirror>::It
);
pub type I32Pair = (i32, i32);
#[repr(C)]
pub struct ZeroSize;
pub type RustFn = fn();
pub type RustBadRet = extern fn() -> Box<u32>;
pub type CVoidRet = ();
pub struct Foo;
#[repr(transparent)]
pub struct TransparentI128(i128);
#[repr(transparent)]
pub struct TransparentStr(&'static str);
#[repr(transparent)]
pub struct TransparentBadFn(RustBadRet);
#[repr(transparent)]
pub struct TransparentInt(u32);
#[repr(transparent)]
pub struct TransparentRef<'a>(&'a TransparentInt);
#[repr(transparent)]
pub struct TransparentLifetime<'a>(*const u8, PhantomData<&'a ()>);
#[repr(transparent)]
pub struct TransparentUnit<U>(f32, PhantomData<U>);
#[repr(transparent)]
pub struct TransparentCustomZst(i32, ZeroSize);
#[repr(C)]
pub struct ZeroSizeWithPhantomData(::std::marker::PhantomData<i32>);
extern {
pub fn ptr_type1(size: *const Foo); //~ ERROR: uses type `Foo`
pub fn ptr_type2(size: *const Foo); //~ ERROR: uses type `Foo`
pub fn slice_type(p: &[u32]); //~ ERROR: uses type `[u32]`
pub fn str_type(p: &str); //~ ERROR: uses type `str`
pub fn box_type(p: Box<u32>); //~ ERROR uses type `std::boxed::Box<u32>`
pub fn char_type(p: char); //~ ERROR uses type `char`
pub fn i128_type(p: i128); //~ ERROR uses type `i128`
pub fn u128_type(p: u128); //~ ERROR uses type `u128`
pub fn trait_type(p: &Clone); //~ ERROR uses type `dyn std::clone::Clone`
pub fn tuple_type(p: (i32, i32)); //~ ERROR uses type `(i32, i32)`
pub fn tuple_type2(p: I32Pair); //~ ERROR uses type `(i32, i32)`
pub fn zero_size(p: ZeroSize); //~ ERROR struct has no fields
pub fn zero_size_phantom(p: ZeroSizeWithPhantomData); //~ ERROR composed only of PhantomData
pub fn zero_size_phantom_toplevel()
-> ::std::marker::PhantomData<bool>; //~ ERROR: composed only of PhantomData
pub fn fn_type(p: RustFn); //~ ERROR function pointer has Rust-specific
pub fn fn_type2(p: fn()); //~ ERROR function pointer has Rust-specific
pub fn fn_contained(p: RustBadRet); //~ ERROR: uses type `std::boxed::Box<u32>`
pub fn transparent_i128(p: TransparentI128); //~ ERROR: uses type `i128`
pub fn transparent_str(p: TransparentStr); //~ ERROR: uses type `str`
pub fn transparent_fn(p: TransparentBadFn); //~ ERROR: uses type `std::boxed::Box<u32>`
pub fn good3(fptr: Option<extern fn()>);
pub fn good4(aptr: &[u8; 4 as usize]);
pub fn good5(s: StructWithProjection);
pub fn good6(s: StructWithProjectionAndLifetime);
pub fn good7(fptr: extern fn() -> ());
pub fn good8(fptr: extern fn() -> !);
pub fn good9() -> ();
pub fn good10() -> CVoidRet;
pub fn good11(size: isize);
pub fn good12(size: usize);
pub fn good13(n: TransparentInt);
pub fn good14(p: TransparentRef);
pub fn good15(p: TransparentLifetime);
pub fn good16(p: TransparentUnit<ZeroSize>);
pub fn good17(p: TransparentCustomZst);
#[allow(improper_ctypes)]
pub fn good18(_: &String);
}
#[allow(improper_ctypes)]
extern {
pub fn good19(_: &String);
}
#[cfg(not(target_arch = "wasm32"))]
extern {
pub fn good1(size: *const libc::c_int);
pub fn good2(size: *const libc::c_uint);
}
fn main() {
}<|fim▁end|>
| |
<|file_name|>TestDOMUtil.java<|end_file_name|><|fim▁begin|>package org.auscope.portal.core.util;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import javax.xml.XMLConstants;
import javax.xml.namespace.NamespaceContext;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.TransformerException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpression;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
import org.auscope.portal.core.test.PortalTestClass;
import org.junit.Assert;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.xml.sax.SAXException;
/**
* Unit tests for DOMUtil
*
* @author Josh Vote
*
*/
public class TestDOMUtil extends PortalTestClass {
/**
* Simple test to ensure that the 2 DOM util methods are reversible
* @throws SAXException
* @throws IOException
* @throws ParserConfigurationException
* @throws TransformerException
*/
@Test
public void testReversibleTransformation() throws ParserConfigurationException, IOException, SAXException, TransformerException {
final String originalXmlString = ResourceUtil
.loadResourceAsString("org/auscope/portal/core/test/xml/TestXML_NoPrettyPrint.xml");
final Document doc = DOMUtil.buildDomFromString(originalXmlString);
final String newXmlString = DOMUtil.buildStringFromDom(doc, false);
Assert.assertEquals(originalXmlString, newXmlString);
}
/**
* Namespace for use with src/test/resources/TestXML_NoPrettyPrint.xml
*
* @author vot002
*
*/
public class SimpleXMLNamespace implements NamespaceContext {
private Map<String, String> map;
public SimpleXMLNamespace() {
map = new HashMap<>();
map.put("test", "http://test.namespace");
map.put("test2", "http://test2.namespace");
}
/**
* This method returns the uri for all prefixes needed.
*
* @param prefix
* @return uri
*/
@Override
public String getNamespaceURI(final String prefix) {
if (prefix == null)
throw new IllegalArgumentException("No prefix provided!");
if (map.containsKey(prefix))
return map.get(prefix);
else
return XMLConstants.NULL_NS_URI;
}
@Override
public String getPrefix(final String namespaceURI) {
// Not needed in this context.
return null;
}
@Override
public Iterator<String> getPrefixes(final String namespaceURI) {
// Not needed in this context.
return null;
}
}
/**
* Simple test to ensure that the DOM object is namespace aware
* @throws XPathExpressionException
* @throws IOException
* @throws SAXException
* @throws ParserConfigurationException
*/
@Test
public void testDOMObjectNamespace() throws XPathExpressionException, IOException, ParserConfigurationException, SAXException {
//Build our DOM
final String originalXmlString = ResourceUtil
.loadResourceAsString("org/auscope/portal/core/test/xml/TestXML_NoPrettyPrint.xml");
final Document doc = DOMUtil.buildDomFromString(originalXmlString);
//Build our queries (namespace aware)
final XPathFactory factory = XPathFactory.newDefaultInstance();
final XPath xPath = factory.newXPath();<|fim▁hole|> final XPathExpression getChild1Expr = xPath.compile("test:root/test2:child1");
final XPathExpression getChild2Expr = xPath.compile("test:root/test2:child2");
final XPathExpression failingExpr = xPath.compile("root/child1");
Node testNode = (Node) getChild1Expr.evaluate(doc, XPathConstants.NODE);
Assert.assertNotNull(testNode);
Assert.assertEquals("child1Value", testNode.getTextContent());
testNode = (Node) getChild2Expr.evaluate(doc, XPathConstants.NODE);
Assert.assertNotNull(testNode);
Assert.assertEquals("child2Value", testNode.getTextContent());
//This should fail (no namespace specified)
testNode = (Node) failingExpr.evaluate(doc, XPathConstants.NODE);
Assert.assertNull(testNode);
}
}<|fim▁end|>
|
xPath.setNamespaceContext(new SimpleXMLNamespace());
|
<|file_name|>scheme.go<|end_file_name|><|fim▁begin|>package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1"
servicecertsignerv1alpha1 "github.com/openshift/api/servicecertsigner/v1alpha1"
)
<|fim▁hole|> utilruntime.Must(operatorv1alpha1.Install(ConfigScheme))
utilruntime.Must(servicecertsignerv1alpha1.Install(ConfigScheme))
}<|fim▁end|>
|
var ConfigScheme = runtime.NewScheme()
func init() {
|
<|file_name|>0003_replace_null_tweetchunk_tz_country.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):<|fim▁hole|> """Write your forwards methods here."""
# Replace all null values with blanks
orm.TweetChunk.objects.filter(tz_country__isnull=True).update(tz_country='')
def backwards(self, orm):
"""Write your backwards methods here."""
# Nothing to do -- blanks are still ok in the previous version
models = {
u'map.maptimeframe': {
'Meta': {'object_name': 'MapTimeFrame'},
'analysis_time': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'calculated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chunks_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missing_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'node_cache_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'node_cache_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'nodes_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'tweet_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'map.treenode': {
'Meta': {'object_name': 'TreeNode', 'index_together': "[['parent', 'word']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['map.TreeNode']"}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'map.tweetchunk': {
'Meta': {'object_name': 'TweetChunk'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['map.TreeNode']"}),
'tweet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['twitter_stream.Tweet']"}),
'tz_country': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
u'map.tz_country': {
'Meta': {'object_name': 'Tz_Country'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_time_zone': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'twitter_stream.tweet': {
'Meta': {'object_name': 'Tweet'},
'analyzed_by': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'favorite_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'filter_level': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to_status_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'retweet_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'retweeted_status_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'truncated': ('django.db.models.fields.BooleanField', [], {}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_followers_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_geo_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'user_screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_time_zone': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_utc_offset': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user_verified': ('django.db.models.fields.BooleanField', [], {})
}
}
complete_apps = ['map']
symmetrical = True<|fim▁end|>
|
def forwards(self, orm):
|
<|file_name|>main.go<|end_file_name|><|fim▁begin|>// Copyright 2012-Present Couchbase, Inc.
//
// Use of this software is governed by the Business Source License included
// in the file licenses/BSL-Couchbase.txt. As of the Change Date specified
// in that file, in accordance with the Business Source License, use of this
// software will be governed by the Apache License, Version 2.0, included in
// the file licenses/APL2.txt.<|fim▁hole|>import (
"math/rand"
"time"
"github.com/couchbase/sync_gateway/rest"
)
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
// Simple Sync Gateway launcher tool.
func main() {
rest.ServerMain()
}<|fim▁end|>
|
package main
|
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from bottle import Bottle, run
app = Bottle()
@app.route('/')
def index():
return 'PService Running'
#
# Start a server instance
#
run(
app, # Run |app| Bottle() instance
host = '0.0.0.0',
port = 8080,
reloader = True, # restarts the server every time edit a module file<|fim▁hole|> debug = True # Comment out it before deploy
)<|fim▁end|>
| |
<|file_name|>CreativeClickThroughUrl.java<|end_file_name|><|fim▁begin|>/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.dfareporting.model;
/**
* Click-through URL
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the DCM/DFA Reporting And Trafficking API. For a detailed
* explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class CreativeClickThroughUrl extends com.google.api.client.json.GenericJson {
/**
* Read-only convenience field representing the actual URL that will be used for this click-
* through. The URL is computed as follows: - If landingPageId is specified then that landing
* page's URL is assigned to this field. - Otherwise, the customClickThroughUrl is assigned to
* this field.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String computedClickThroughUrl;
/**
* Custom click-through URL. Applicable if the landingPageId field is left unset.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String customClickThroughUrl;
/**
* ID of the landing page for the click-through URL.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key @com.google.api.client.json.JsonString
private java.lang.Long landingPageId;
/**
* Read-only convenience field representing the actual URL that will be used for this click-
* through. The URL is computed as follows: - If landingPageId is specified then that landing
* page's URL is assigned to this field. - Otherwise, the customClickThroughUrl is assigned to
* this field.
* @return value or {@code null} for none
*/
public java.lang.String getComputedClickThroughUrl() {
return computedClickThroughUrl;
}
/**
* Read-only convenience field representing the actual URL that will be used for this click-
* through. The URL is computed as follows: - If landingPageId is specified then that landing
* page's URL is assigned to this field. - Otherwise, the customClickThroughUrl is assigned to
* this field.
* @param computedClickThroughUrl computedClickThroughUrl or {@code null} for none
*/
public CreativeClickThroughUrl setComputedClickThroughUrl(java.lang.String computedClickThroughUrl) {
this.computedClickThroughUrl = computedClickThroughUrl;
return this;
}
/**
* Custom click-through URL. Applicable if the landingPageId field is left unset.
* @return value or {@code null} for none
*/<|fim▁hole|> return customClickThroughUrl;
}
/**
* Custom click-through URL. Applicable if the landingPageId field is left unset.
* @param customClickThroughUrl customClickThroughUrl or {@code null} for none
*/
public CreativeClickThroughUrl setCustomClickThroughUrl(java.lang.String customClickThroughUrl) {
this.customClickThroughUrl = customClickThroughUrl;
return this;
}
/**
* ID of the landing page for the click-through URL.
* @return value or {@code null} for none
*/
public java.lang.Long getLandingPageId() {
return landingPageId;
}
/**
* ID of the landing page for the click-through URL.
* @param landingPageId landingPageId or {@code null} for none
*/
public CreativeClickThroughUrl setLandingPageId(java.lang.Long landingPageId) {
this.landingPageId = landingPageId;
return this;
}
@Override
public CreativeClickThroughUrl set(String fieldName, Object value) {
return (CreativeClickThroughUrl) super.set(fieldName, value);
}
@Override
public CreativeClickThroughUrl clone() {
return (CreativeClickThroughUrl) super.clone();
}
}<|fim▁end|>
|
public java.lang.String getCustomClickThroughUrl() {
|
<|file_name|>test_error.py<|end_file_name|><|fim▁begin|># pylint: disable-all
import unittest
from circleci.error import CircleCIException, BadKeyError, BadVerbError, InvalidFilterError
class TestCircleCIError(unittest.TestCase):
def setUp(self):
self.base = CircleCIException('fake')
self.key = BadKeyError('fake')
self.verb = BadVerbError('fake')
self.filter = InvalidFilterError('fake', 'status')
self.afilter = InvalidFilterError('fake', 'artifacts')
def test_error_implements_str(self):
self.assertTrue(self.base.__str__ is not object.__str__)
string = self.base.__str__()
self.assertIn('invalid', string)
def test_verb_message(self):
self.assertIn('DELETE', self.verb.message)
<|fim▁hole|> self.assertIn('running', self.filter.message)
self.assertIn('completed', self.afilter.message)<|fim▁end|>
|
def test_key_message(self):
self.assertIn('deploy-key', self.key.message)
def test_filter_message(self):
|
<|file_name|>as-array-test.js<|end_file_name|><|fim▁begin|>/* eslint-env jest */
jest.unmock('../as-array');
import {
asArray,
} from '../as-array';
describe('asArray()', () => {
it('returns input as it is if an array', () => {
expect(asArray([1, 2, 3])).toEqual([1, 2, 3]);
});
<|fim▁hole|> });
it('converts null and undefined to an empty array', () => {
expect(asArray(null)).toEqual([]);
expect(asArray(undefined)).toEqual([]);
});
});<|fim▁end|>
|
it('converts scalar value to an array which contains it', () => {
expect(asArray(42)).toEqual([42]);
|
<|file_name|>contents.js<|end_file_name|><|fim▁begin|>(function(){
<|fim▁hole|>print (new \tomk79\pickles2\px2dthelper\main($px))->document_modules()->build_js();
?>
})();<|fim▁end|>
|
<?php
|
<|file_name|>Event.ts<|end_file_name|><|fim▁begin|>// 注意不能重名
module GameEvents {
// 游戏状态
export var GAME_READY : string = "GameEventsGameReady";
export var GAME_START : string = "GameEventsGameStart";
export var GAME_PLAY : string = "GameEventsGamePlay";
export var GAME_OVER : string = "GameEventsGameOver";
<|fim▁hole|> export var ADD_SCORE : string = "GameEventsAddScore";
export var TAP_BIRD : string = "GameEventsTapBird";
}
module UIEvents {
export var OPEN_PANEL : string = "UIEventsOpenPanel";
export var CLOSE_PANEL : string = "UIEventsUiClosePanel";
}<|fim▁end|>
|
// 游戏逻辑
|
<|file_name|>viewer.js<|end_file_name|><|fim▁begin|>import React, {Component} from 'react';
import PdfJS from './pdfJS';
import Contract from "../../contract";
import Event from '../../event';
import AnnotationLoader from '../../annotator/loader';
class Viewer extends Component {
constructor(props) {
super(props);
this.state = ({
page_no: 0,
pdf_url: "",
scale: 0,
loading: true
});
}
componentDidMount() {
this.subscribe_zoom = Event.subscribe('zoom:change', (scale) => {
this.setState({scale: scale});
});
this.updateState(this.props);
}
updateState(props) {
var {page_no, pdf_url} = props.page;
var scale = Contract.getPdfScale();
this.setState({
page_no,
pdf_url,
scale,
loading: false
});
}
componentWillUnmount() {
this.subscribe_zoom.remove();
}
getPageID() {
return 'pdf-' + this.state.page_no;
}
getAnnotations() {
let page = [];<|fim▁hole|> if (typeof annotation.shapes == 'object' && this.state.page_no == annotation.page_no) {
page.push(annotation);
}
});
return page;
}
onPageRendered() {
if (!this.annotator) {
this.annotator = new AnnotationLoader('.pdf-annotator');
this.annotator.init();
Contract.setAnnotatorInstance(this.annotator);
}
const annotations = this.getAnnotations();
if (annotations.length > 0) {
this.annotator.content.annotator("loadAnnotations", annotations);
}
Event.publish('annotation:loaded', 'pdf');
}
componentWillReceiveProps(props) {
this.updateState(props);
}
shouldComponentUpdate(nextProps, nextState) {
return (nextProps.page.page_no !== this.state.page_no || this.state.scale !== nextState.scale);
}
render() {
if (this.state.loading) {
return ( <div className="pdf-viewer pdf-annotator">
<div className="pdf-wrapper">
Loading...
</div>
</div>);
}
return (
<div className="pdf-viewer pdf-annotator">
<div id={this.getPageID()} className="pdf-wrapper">
<PdfJS onPageRendered={this.onPageRendered.bind(this)}
file={this.state.pdf_url}
page={this.state.page_no}
scale={this.state.scale}/>
</div>
<a href="#" className="change-view-icon exit-fullscreen"></a>
</div>
);
}
}
export default Viewer;<|fim▁end|>
|
let annotations = Contract.getAnnotations();
annotations.result.forEach(annotation=> {
|
<|file_name|>introspection_helper.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from gi.repository import GLib, GObject, Gio
from dfeet import dbus_utils
def args_signature_markup(arg_signature):
return '<small><span foreground="#2E8B57">%s</span></small>' % (arg_signature)
def args_name_markup(arg_name):
return '<small>%s</small>' % (arg_name,)
class DBusNode(GObject.GObject):
"""object to represent a DBus Node (object path)"""
def __init__(self, name, object_path, node_info):
GObject.GObject.__init__(self)
self.__name = name
self.__object_path = object_path
self.__node_info = node_info # Gio.GDBusNodeInfo object
def __repr__(self):
return "Name: %s ; ObjPath: %s ; NodeInfo: %s" % (
self.name, self.object_path, self.node_info)
@property
def name(self):
return self.__name
@property
def object_path(self):
return self.__object_path
@property
def node_info(self):
return self.__node_info
class DBusInterface(DBusNode):
"""object to represent a DBus Interface"""
def __init__(self, dbus_node_obj, iface_info):
DBusNode.__init__(self, dbus_node_obj.name,
dbus_node_obj.object_path, dbus_node_obj.node_info)
self.__iface_info = iface_info # Gio.GDBusInterfaceInfo object
def __repr__(self):
return "iface '%s' on node '%s'" % (self.iface_info.name, self.node_info.path)
@property
def iface_info(self):
return self.__iface_info
class DBusProperty(DBusInterface):
"""object to represent a DBus Property"""
def __init__(self, dbus_iface_obj, property_info):
DBusInterface.__init__(self, dbus_iface_obj, dbus_iface_obj.iface_info)<|fim▁hole|>
def __repr__(self):
sig = dbus_utils.sig_to_string(self.property_info.signature)
return "%s %s (%s)" % (sig, self.property_info.name, self.property_info.flags)
@property
def property_info(self):
return self.__property_info
@property
def value(self):
return self.__value
@value.setter
def value(self, new_val):
self.__value = new_val
@property
def markup_str(self):
sig = dbus_utils.sig_to_string(self.property_info.signature)
readwrite = list()
if self.readable:
readwrite.append("read")
if self.writable:
readwrite.append("write")
s = "%s %s <small>(%s)</small>" % (
args_signature_markup(sig),
args_name_markup(self.property_info.name), " / ".join(readwrite))
if self.value is not None:
s += " = %s" % (GLib.markup_escape_text(str(self.value), -1),)
return s
@property
def readable(self):
if int(self.property_info.flags) == int(Gio.DBusPropertyInfoFlags.READABLE) or \
int(self.property_info.flags) == \
(int(Gio.DBusPropertyInfoFlags.WRITABLE | Gio.DBusPropertyInfoFlags.READABLE)):
return True
else:
return False
@property
def writable(self):
if int(self.property_info.flags) == int(Gio.DBusPropertyInfoFlags.WRITABLE) or \
int(self.property_info.flags) == \
(int(Gio.DBusPropertyInfoFlags.WRITABLE | Gio.DBusPropertyInfoFlags.READABLE)):
return True
else:
return False
class DBusSignal(DBusInterface):
"""object to represent a DBus Signal"""
def __init__(self, dbus_iface_obj, signal_info):
DBusInterface.__init__(self, dbus_iface_obj,
dbus_iface_obj.iface_info)
self.__signal_info = signal_info # Gio.GDBusSignalInfo object
def __repr__(self):
return "%s" % (self.signal_info.name)
@property
def signal_info(self):
return self.__signal_info
@property
def args(self):
args = list()
for arg in self.signal_info.args:
sig = dbus_utils.sig_to_string(arg.signature)
args.append({'signature': sig, 'name': arg.name})
return args
@property
def args_markup_str(self):
result = ''
result += '<span foreground="#FF00FF">(</span>'
result += ', '.join('%s' % (args_signature_markup(arg['signature'])) for arg in self.args)
result += '<span foreground="#FF00FF">)</span>'
return result
@property
def markup_str(self):
return "%s %s" % (self.signal_info.name, self.args_markup_str)
class DBusMethod(DBusInterface):
"""object to represent a DBus Method"""
def __init__(self, dbus_iface_obj, method_info):
DBusInterface.__init__(self, dbus_iface_obj, dbus_iface_obj.iface_info)
self.__method_info = method_info # Gio.GDBusMethodInfo object
def __repr__(self):
return "%s(%s) ↦ %s (%s)" % (
self.method_info.name, self.in_args_str,
self.out_args_str, DBusInterface.__repr__(self))
@property
def in_args_code(self):
in_args = ""
for a in self.__method_info.in_args:
in_args += a.signature
return in_args
@property
def method_info(self):
return self.__method_info
@property
def markup_str(self):
return "%s %s <b>↦</b> %s" % (
self.method_info.name, self.in_args_markup_str, self.out_args_markup_str)
@property
def in_args(self):
in_args = list()
for in_arg in self.method_info.in_args:
sig = dbus_utils.sig_to_string(in_arg.signature)
in_args.append({'signature': sig, 'name': in_arg.name})
return in_args
@property
def out_args(self):
out_args = list()
for out_arg in self.method_info.out_args:
sig = dbus_utils.sig_to_string(out_arg.signature)
out_args.append({'signature': sig, 'name': out_arg.name})
return out_args
@property
def in_args_str(self):
result = ""
for arg in self.in_args:
result += "%s %s, " % (arg['signature'], arg['name'])
return result[0:-2]
@property
def out_args_str(self):
result = ""
for arg in self.out_args:
result += "%s %s, " % (arg['signature'], arg['name'])
return result[0:-2]
def __args_markup_str(self, args):
"""markup a given list of args"""
result = ''
result += '<span foreground="#FF00FF">(</span>'
result += ', '.join(
'%s %s' % (
args_signature_markup(arg['signature']),
args_name_markup(arg['name'])) for arg in args)
result += '<span foreground="#FF00FF">)</span>'
return result
@property
def in_args_markup_str(self):
return self.__args_markup_str(self.in_args)
@property
def out_args_markup_str(self):
return self.__args_markup_str(self.out_args)
class DBusAnnotation(DBusInterface):
"""object to represent a DBus Annotation"""
def __init__(self, dbus_iface_obj, annotation_info):
DBusInterface.__init__(self, dbus_iface_obj,
dbus_iface_obj.iface_info)
self.__annotation_info = annotation_info # Gio.GDBusAnnotationInfo object
def __repr__(self):
return "%s: %s" % (self.annotation_info.key, self.annotation_info.value)
@property
def annotation_info(self):
return self.__annotation_info
@property
def markup_str(self):
return "%s: %s" % (self.annotation_info.key, self.annotation_info.value)<|fim▁end|>
|
self.__property_info = property_info # Gio.GDBusPropertyInfo object
self.__value = None # the value
|
<|file_name|>common.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
__author__ = 'eveliotc'
__license__ = 'See LICENSE'
import alfred<|fim▁hole|>from alfred import Item
import sys
from subprocess import Popen, PIPE
def json_to_obj(x):
if isinstance(x, dict):
return type('X', (), {k: json_to_obj(v) for k, v in x.iteritems()})
else:
return x
def join_query(dic):
return ' '.join(dic)
def le_result(r, exit = True):
alfred.write(r)
if exit:
sys.exit()
def xml_result(r, exit = True):
if len(r) < 1:
empty_result(exit)
else:
le_result(alfred.xml(r), exit)
def empty_result(exit = True):
empty = Item(
attributes={'uid': alfred.uid('empty'), 'arg': ''},
title='Gradle Please',
subtitle=u':( Nothing found.',
icon=u'icon.png')
xml_result([empty], exit)
def apple_script(scpt, args=[]):
p = Popen(['osascript', '-'] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(scpt)
return stdout
def tell_alfred(what):
apple_script('tell application "Alfred 2" to search "%s"' % what)
# TODO refactor gp.py to use this instead of dynamic obj
class Pom(object):
a = ''
g = ''
p = ''
latestVersion = ''
source = ''
@property
def id(self):
return self.g + ':' + self.a
def __repr__(self):
#notjson #justdebugginthings
return '{id:%s a:%s g:%s p:%s v:%s}' % (self.id, self.a, self.g, self.p, self.latestVersion)<|fim▁end|>
| |
<|file_name|>lsf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#Copyright (C) 2013 by Thomas Keane ([email protected])
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import os
import re
import subprocess
import time
import sys
import random
import string
from Queue import Queue, Empty
from threading import Thread
from datetime import date
from sonLib.bioio import logger
from sonLib.bioio import system
from jobTree.batchSystems.abstractBatchSystem import AbstractBatchSystem
from jobTree.src.master import getParasolResultsFileName
class MemoryString:
def __init__(self, string):
if string[-1] == 'K' or string[-1] == 'M' or string[-1] == 'G':
self.unit = string[-1]
self.val = float(string[:-1])
else:
self.unit = 'B'
self.val = float(string)
self.bytes = self.byteVal()
def __str__(self):
if self.unit != 'B':
return str(self.val) + self.unit
else:
return str(self.val)
def byteVal(self):
if self.unit == 'B':
return self.val
elif self.unit == 'K':
return self.val * 1000
elif self.unit == 'M':
return self.val * 1000000
elif self.unit == 'G':
return self.val * 1000000000
def __cmp__(self, other):
return cmp(self.bytes, other.bytes)
def prepareBsub(cpu, mem):
mem = '' if mem is None else '-R "select[type==X86_64 && mem > ' + str(int(mem/ 1000000)) + '] rusage[mem=' + str(int(mem/ 1000000)) + ']" -M' + str(int(mem/ 1000000)) + '000'
cpu = '' if cpu is None else '-n ' + str(int(cpu))
bsubline = ["bsub", mem, cpu,"-cwd", ".", "-o", "/dev/null", "-e", "/dev/null"]
return bsubline
def bsub(bsubline):
process = subprocess.Popen(" ".join(bsubline), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
liney = process.stdout.readline()
logger.info("BSUB: " + liney)
result = int(liney.strip().split()[1].strip('<>'))
logger.debug("Got the job id: %s" % (str(result)))
return result
def getjobexitcode(lsfJobID):
job, task = lsfJobID
#first try bjobs to find out job state
args = ["bjobs", "-l", str(job)]
logger.info("Checking job exit code for job via bjobs: " + str(job))
process = subprocess.Popen(" ".join(args), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
started = 0
for line in process.stdout:
if line.find("Done successfully") > -1:
logger.info("bjobs detected job completed for job: " + str(job))
return 0
elif line.find("Completed <exit>") > -1:
logger.info("bjobs detected job failed for job: " + str(job))
return 1
elif line.find("New job is waiting for scheduling") > -1:
logger.info("bjobs detected job pending scheduling for job: " + str(job))
return None
elif line.find("PENDING REASONS") > -1:
logger.info("bjobs detected job pending for job: " + str(job))
return None
elif line.find("Started on ") > -1:
started = 1
if started == 1:
logger.info("bjobs detected job started but not completed: " + str(job))
return None
#if not found in bjobs, then try bacct (slower than bjobs)
logger.info("bjobs failed to detect job - trying bacct: " + str(job))
args = ["bacct", "-l", str(job)]
logger.info("Checking job exit code for job via bacct:" + str(job))
process = subprocess.Popen(" ".join(args), shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
for line in process.stdout:
if line.find("Completed <done>") > -1:
logger.info("Detected job completed for job: " + str(job))
return 0
elif line.find("Completed <exit>") > -1:
logger.info("Detected job failed for job: " + str(job))
return 1
logger.info("Cant determine exit code for job or job still running: " + str(job))
return None
class Worker(Thread):
def __init__(self, newJobsQueue, updatedJobsQueue, boss):
Thread.__init__(self)
self.newJobsQueue = newJobsQueue
self.updatedJobsQueue = updatedJobsQueue
self.currentjobs = list()
self.runningjobs = set()
self.boss = boss
def run(self):
while True:
# Load new job ids:
while not self.newJobsQueue.empty():
self.currentjobs.append(self.newJobsQueue.get())
# Launch jobs as necessary:
while len(self.currentjobs) > 0:
jobID, bsubline = self.currentjobs.pop()
lsfJobID = bsub(bsubline)
self.boss.jobIDs[(lsfJobID, None)] = jobID
self.boss.lsfJobIDs[jobID] = (lsfJobID, None)
self.runningjobs.add((lsfJobID, None))
# Test known job list
for lsfJobID in list(self.runningjobs):
exit = getjobexitcode(lsfJobID)
if exit is not None:
self.updatedJobsQueue.put((lsfJobID, exit))
self.runningjobs.remove(lsfJobID)
time.sleep(10)
class LSFBatchSystem(AbstractBatchSystem):
"""The interface for running jobs on lsf, runs all the jobs you
give it as they come in, but in parallel.
"""
@classmethod
def getDisplayNames(cls):
"""
Names used to select this batch system.
"""
return ["lsf","LSF"]
def __init__(self, config, maxCpus, maxMemory):
AbstractBatchSystem.__init__(self, config, maxCpus, maxMemory) #Call the parent constructor
self.lsfResultsFile = getParasolResultsFileName(config.attrib["job_tree"])
#Reset the job queue and results (initially, we do this again once we've killed the jobs)
self.lsfResultsFileHandle = open(self.lsfResultsFile, 'w')
self.lsfResultsFileHandle.close() #We lose any previous state in this file, and ensure the files existence
self.currentjobs = set()
self.obtainSystemConstants()
self.jobIDs = dict()
self.lsfJobIDs = dict()
self.nextJobID = 0
self.newJobsQueue = Queue()
self.updatedJobsQueue = Queue()
self.worker = Worker(self.newJobsQueue, self.updatedJobsQueue, self)
self.worker.setDaemon(True)
self.worker.start()
def __des__(self):
#Closes the file handle associated with the results file.
self.lsfResultsFileHandle.close() #Close the results file, cos were done. <|fim▁hole|> def issueJob(self, command, memory, cpu):
jobID = self.nextJobID
self.nextJobID += 1
self.currentjobs.add(jobID)
bsubline = prepareBsub(cpu, memory) + [command]
self.newJobsQueue.put((jobID, bsubline))
logger.info("Issued the job command: %s with job id: %s " % (command, str(jobID)))
return jobID
def getLsfID(self, jobID):
if not jobID in self.lsfJobIDs:
RuntimeError("Unknown jobID, could not be converted")
(job,task) = self.lsfJobIDs[jobID]
if task is None:
return str(job)
else:
return str(job) + "." + str(task)
def killJobs(self, jobIDs):
"""Kills the given job IDs.
"""
for jobID in jobIDs:
logger.info("DEL: " + str(self.getLsfID(jobID)))
self.currentjobs.remove(jobID)
process = subprocess.Popen(["bkill", self.getLsfID(jobID)])
del self.jobIDs[self.lsfJobIDs[jobID]]
del self.lsfJobIDs[jobID]
toKill = set(jobIDs)
while len(toKill) > 0:
for jobID in list(toKill):
if getjobexitcode(self.lsfJobIDs[jobID]) is not None:
toKill.remove(jobID)
if len(toKill) > 0:
logger.critical("Tried to kill some jobs, but something happened and they are still going, so I'll try again")
time.sleep(5)
def getIssuedJobIDs(self):
"""A list of jobs (as jobIDs) currently issued (may be running, or maybe
just waiting).
"""
return self.currentjobs
def getRunningJobIDs(self):
"""Gets a map of jobs (as jobIDs) currently running (not just waiting)
and a how long they have been running for (in seconds).
"""
times = {}
currentjobs = set(self.lsfJobIDs[x] for x in self.getIssuedJobIDs())
process = subprocess.Popen(["bjobs"], stdout = subprocess.PIPE)
for currline in process.stdout:
items = curline.strip().split()
if (len(items) > 9 and (items[0]) in currentjobs) and items[2] == 'RUN':
jobstart = "/".join(items[7:9]) + '/' + str(date.today().year)
jobstart = jobstart + ' ' + items[9]
jobstart = time.mktime(time.strptime(jobstart,"%b/%d/%Y %H:%M"))
jobstart = time.mktime(time.strptime(jobstart,"%m/%d/%Y %H:%M:%S"))
times[self.jobIDs[(items[0])]] = time.time() - jobstart
return times
def getUpdatedJob(self, maxWait):
i = None
try:
sgeJobID, retcode = self.updatedJobsQueue.get(timeout=maxWait)
self.updatedJobsQueue.task_done()
i = (self.jobIDs[sgeJobID], retcode)
self.currentjobs -= set([self.jobIDs[sgeJobID]])
except Empty:
pass
return i
def getWaitDuration(self):
"""We give parasol a second to catch its breath (in seconds)
"""
#return 0.0
return 15
def getRescueJobFrequency(self):
"""Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
making it expensive. We allow this every 10 minutes..
"""
return 1800
def obtainSystemConstants(self):
p = subprocess.Popen(["lshosts"], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
line = p.stdout.readline()
items = line.strip().split()
num_columns = len(items)
cpu_index = None
mem_index = None
for i in range(num_columns):
if items[i] == 'ncpus':
cpu_index = i
elif items[i] == 'maxmem':
mem_index = i
if cpu_index is None or mem_index is None:
RuntimeError("lshosts command does not return ncpus or maxmem columns")
p.stdout.readline()
self.maxCPU = 0
self.maxMEM = MemoryString("0")
for line in p.stdout:
items = line.strip().split()
if len(items) < num_columns:
RuntimeError("lshosts output has a varying number of columns")
if items[cpu_index] != '-' and items[cpu_index] > self.maxCPU:
self.maxCPU = items[cpu_index]
if items[mem_index] != '-' and MemoryString(items[mem_index]) > self.maxMEM:
self.maxMEM = MemoryString(items[mem_index])
if self.maxCPU is 0 or self.maxMEM is 0:
RuntimeError("lshosts returns null ncpus or maxmem info")
logger.info("Got the maxCPU: %s" % (self.maxMEM))
def main():
pass
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main()<|fim▁end|>
| |
<|file_name|>implant_method.py<|end_file_name|><|fim▁begin|># coding=utf8
from types import MethodType
def implant_method(obj, func, func_name):
base_class = obj.__class__
event = MethodType(func, obj, base_class)<|fim▁hole|> pass<|fim▁end|>
|
setattr(obj, func_name, event)
if __name__ == "__main__":
|
<|file_name|>committers.py<|end_file_name|><|fim▁begin|># Copyright (c) 2011, Apple Inc. All rights reserved.
# Copyright (c) 2009, 2011, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for committer and reviewer validation.
import fnmatch
import json
from webkitpy.common.editdistance import edit_distance
from webkitpy.common.memoized import memoized
from webkitpy.common.system.filesystem import FileSystem
# The list of contributors have been moved to contributors.json
class Contributor(object):
def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
assert(name)
assert(email_or_emails)
self.full_name = name
if isinstance(email_or_emails, str):
self.emails = [email_or_emails]
else:
self.emails = email_or_emails
self.emails = map(lambda email: email.lower(), self.emails) # Emails are case-insensitive.
if isinstance(irc_nickname_or_nicknames, str):
self.irc_nicknames = [irc_nickname_or_nicknames]
else:
self.irc_nicknames = irc_nickname_or_nicknames
self.can_commit = False
self.can_review = False
def bugzilla_email(self):
# FIXME: We're assuming the first email is a valid bugzilla email,
# which might not be right.
return self.emails[0]
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
def contains_string(self, search_string):
string = search_string.lower()
if string in self.full_name.lower():
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if string in nickname.lower():
return True
for email in self.emails:
if string in email:
return True
return False
def matches_glob(self, glob_string):
if fnmatch.fnmatch(self.full_name, glob_string):
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if fnmatch.fnmatch(nickname, glob_string):
return True
for email in self.emails:
if fnmatch.fnmatch(email, glob_string):
return True
return False
class Committer(Contributor):
def __init__(self, name, email_or_emails, irc_nickname=None):
Contributor.__init__(self, name, email_or_emails, irc_nickname)
self.can_commit = True
class Reviewer(Committer):
def __init__(self, name, email_or_emails, irc_nickname=None):
Committer.__init__(self, name, email_or_emails, irc_nickname)
self.can_review = True
class CommitterList(object):
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
committers=[],
reviewers=[],
contributors=[]):
# FIXME: These arguments only exist for testing. Clean it up.
if not (committers or reviewers or contributors):
loaded_data = self.load_json()
contributors = loaded_data['Contributors']
committers = loaded_data['Committers']
reviewers = loaded_data['Reviewers']
self._contributors = contributors + committers + reviewers
self._committers = committers + reviewers
self._reviewers = reviewers
self._contributors_by_name = {}
self._accounts_by_email = {}
self._accounts_by_login = {}
@staticmethod
@memoized
def load_json():
filesystem = FileSystem()
json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'contributors.json')
contributors = json.loads(filesystem.read_text_file(json_path))
return {
'Contributors': [Contributor(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Contributors'].iteritems()],
'Committers': [Committer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Committers'].iteritems()],
'Reviewers': [Reviewer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Reviewers'].iteritems()],
}
def contributors(self):
return self._contributors
def committers(self):
return self._committers
def reviewers(self):
return self._reviewers
def _name_to_contributor_map(self):
if not len(self._contributors_by_name):
for contributor in self._contributors:
assert(contributor.full_name)
assert(contributor.full_name.lower() not in self._contributors_by_name) # We should never have duplicate names.
self._contributors_by_name[contributor.full_name.lower()] = contributor
return self._contributors_by_name
def _email_to_account_map(self):
if not len(self._accounts_by_email):
for account in self._contributors:
for email in account.emails:
assert(email not in self._accounts_by_email) # We should never have duplicate emails.
self._accounts_by_email[email] = account
return self._accounts_by_email
def _login_to_account_map(self):
if not len(self._accounts_by_login):
for account in self._contributors:
if account.emails:
login = account.bugzilla_email()
assert(login not in self._accounts_by_login) # We should never have duplicate emails.
self._accounts_by_login[login] = account
return self._accounts_by_login
def _committer_only(self, record):
if record and not record.can_commit:
return None
return record
def _reviewer_only(self, record):
if record and not record.can_review:
return None
return record
def committer_by_name(self, name):
return self._committer_only(self.contributor_by_name(name))
def contributor_by_irc_nickname(self, irc_nickname):
for contributor in self.contributors():
# FIXME: This should do case-insensitive comparison or assert that all IRC nicknames are in lowercase
if contributor.irc_nicknames and irc_nickname in contributor.irc_nicknames:
return contributor
return None
def contributors_by_search_string(self, string):
glob_matches = filter(lambda contributor: contributor.matches_glob(string), self.contributors())
return glob_matches or filter(lambda contributor: contributor.contains_string(string), self.contributors())
def contributors_by_email_username(self, string):
string = string + '@'
result = []
for contributor in self.contributors():
for email in contributor.emails:
if email.startswith(string):
result.append(contributor)
break
return result
def _contributor_name_shorthands(self, contributor):
if ' ' not in contributor.full_name:
return []
split_fullname = contributor.full_name.split()
first_name = split_fullname[0]
last_name = split_fullname[-1]
return first_name, last_name, first_name + last_name[0], first_name + ' ' + last_name[0]
<|fim▁hole|> full_name_in_lowercase = contributor.full_name.lower()
tokens = [full_name_in_lowercase] + full_name_in_lowercase.split()
if contributor.irc_nicknames:
return tokens + [nickname.lower() for nickname in contributor.irc_nicknames if len(nickname) > 5]
return tokens
def contributors_by_fuzzy_match(self, string):
string_in_lowercase = string.lower()
# 1. Exact match for fullname, email and irc_nicknames
account = self.contributor_by_name(string_in_lowercase) or self.contributor_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
if account:
return [account], 0
# 2. Exact match for email username (before @)
accounts = self.contributors_by_email_username(string_in_lowercase)
if accounts and len(accounts) == 1:
return accounts, 0
# 3. Exact match for first name, last name, and first name + initial combinations such as "Dan B" and "Tim H"
accounts = [contributor for contributor in self.contributors() if string in self._contributor_name_shorthands(contributor)]
if accounts and len(accounts) == 1:
return accounts, 0
# 4. Finally, fuzzy-match using edit-distance
string = string_in_lowercase
contributorWithMinDistance = []
minDistance = len(string) / 2 - 1
for contributor in self.contributors():
tokens = self._tokenize_contributor_name(contributor)
editdistances = [edit_distance(token, string) for token in tokens if abs(len(token) - len(string)) <= minDistance]
if not editdistances:
continue
distance = min(editdistances)
if distance == minDistance:
contributorWithMinDistance.append(contributor)
elif distance < minDistance:
contributorWithMinDistance = [contributor]
minDistance = distance
if not len(contributorWithMinDistance):
return [], len(string)
return contributorWithMinDistance, minDistance
def contributor_by_email(self, email):
return self._email_to_account_map().get(email.lower()) if email else None
def contributor_by_name(self, name):
return self._name_to_contributor_map().get(name.lower()) if name else None
def committer_by_email(self, email):
return self._committer_only(self.contributor_by_email(email))
def reviewer_by_email(self, email):
return self._reviewer_only(self.contributor_by_email(email))<|fim▁end|>
|
def _tokenize_contributor_name(self, contributor):
|
<|file_name|>cputemp_cubie1.py<|end_file_name|><|fim▁begin|>## Cubieboard@Armbian plugin for temperature measurment
from libraries import utility
def plugin_main(json_arguments=None):
data="Undefned" <|fim▁hole|><|fim▁end|>
|
with open ("/sys/class/hwmon/hwmon0/device/temp1_input", "r") as temperature:
data=temperature.read()
return "{0} C".format(int(data)/1000)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.