file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
mod.rs | //! regex.h implementation, following http://pubs.opengroup.org/onlinepubs/7908799/xsh/regex.h.html
use alloc::borrow::Cow;
use alloc::vec::Vec;
use core::{mem, ptr, slice};
use header::string::strlen;
use platform::types::*;
use posix_regex::compile::{Error as CompileError, Range, Token};
use posix_regex::{PosixRegex, PosixRegexBuilder};
pub type regoff_t = size_t;
#[repr(C)]
pub struct regex_t {
// Can't be a normal Vec<T> because then the struct size won't be known
// from C.
ptr: *mut c_void,
length: size_t,
capacity: size_t,
cflags: c_int,
re_nsub: size_t,
}
#[repr(C)]
pub struct regmatch_t {
rm_so: regoff_t,
rm_eo: regoff_t,
}
pub const REG_EXTENDED: c_int = 1;
pub const REG_ICASE: c_int = 2;
pub const REG_NOSUB: c_int = 4;
pub const REG_NEWLINE: c_int = 8;
pub const REG_NOTBOL: c_int = 16;
pub const REG_NOTEOL: c_int = 32;
pub const REG_NOMATCH: c_int = 1;
pub const REG_BADPAT: c_int = 2;
pub const REG_ECOLLATE: c_int = 3;
pub const REG_ECTYPE: c_int = 4;
pub const REG_EESCAPE: c_int = 5;
pub const REG_ESUBREG: c_int = 6;
pub const REG_EBRACK: c_int = 7;
pub const REG_ENOSYS: c_int = 8;
pub const REG_EPAREN: c_int = 9;
pub const REG_EBRACE: c_int = 10;
pub const REG_BADBR: c_int = 11;
pub const REG_ERANGE: c_int = 12;
pub const REG_ESPACE: c_int = 13;
pub const REG_BADRPT: c_int = 14;
#[no_mangle]
#[linkage = "weak"] // redefined in GIT
pub unsafe extern "C" fn regcomp(out: *mut regex_t, pat: *const c_char, cflags: c_int) -> c_int {
if cflags & REG_EXTENDED == REG_EXTENDED {
return REG_ENOSYS;
}
let pat = slice::from_raw_parts(pat as *const u8, strlen(pat));
let res = PosixRegexBuilder::new(pat)
.with_default_classes()
.compile_tokens();
match res {
Ok(mut branches) => {
let re_nsub = PosixRegex::new(Cow::Borrowed(&branches)).count_groups();
*out = regex_t {
ptr: branches.as_mut_ptr() as *mut c_void,
length: branches.len(),
capacity: branches.capacity(),
cflags,
re_nsub,
};
mem::forget(branches);
0
}
Err(CompileError::EmptyRepetition)
| Err(CompileError::IntegerOverflow)
| Err(CompileError::IllegalRange) => REG_BADBR,
Err(CompileError::UnclosedRepetition) => REG_EBRACE,
Err(CompileError::LeadingRepetition) => REG_BADRPT,
Err(CompileError::UnknownCollation) => REG_ECOLLATE,
Err(CompileError::UnknownClass(_)) => REG_ECTYPE,
Err(_) => REG_BADPAT,
}
}
#[no_mangle]
#[linkage = "weak"] // redefined in GIT
pub unsafe extern "C" fn regfree(regex: *mut regex_t) {
Vec::from_raw_parts(
(*regex).ptr as *mut Vec<(Token, Range)>,
(*regex).length,
(*regex).capacity,
);
}
#[no_mangle]
#[linkage = "weak"] // redefined in GIT
pub unsafe extern "C" fn regexec(
regex: *const regex_t,
input: *const c_char,
nmatch: size_t,
pmatch: *mut regmatch_t,
eflags: c_int,
) -> c_int {
if eflags & REG_EXTENDED == REG_EXTENDED {
return REG_ENOSYS; | // Allow specifying a compiler argument to the executor and vise versa
// because why not?
let flags = regex.cflags | eflags;
let input = slice::from_raw_parts(input as *const u8, strlen(input));
let branches = slice::from_raw_parts(regex.ptr as *const Vec<(Token, Range)>, regex.length);
let matches = PosixRegex::new(Cow::Borrowed(&branches))
.case_insensitive(flags & REG_ICASE == REG_ICASE)
.newline(flags & REG_NEWLINE == REG_NEWLINE)
.no_start(flags & REG_NOTBOL == REG_NOTBOL)
.no_end(flags & REG_NOTEOL == REG_NOTEOL)
.matches(input, Some(1));
if !matches.is_empty() && eflags & REG_NOSUB != REG_NOSUB && !pmatch.is_null() && nmatch > 0 {
let first = &matches[0];
for i in 0..nmatch {
let (start, end) = first.get(i).and_then(|&range| range).unwrap_or((!0, !0));
*pmatch.add(i) = regmatch_t {
rm_so: start,
rm_eo: end,
};
}
}
if matches.is_empty() {
REG_NOMATCH
} else {
0
}
}
#[no_mangle]
#[linkage = "weak"] // redefined in GIT
pub extern "C" fn regerror(
code: c_int,
_regex: *const regex_t,
out: *mut c_char,
max: size_t,
) -> size_t {
let string = match code {
0 => "No error\0",
REG_NOMATCH => "No match\0",
REG_BADPAT => "Invalid regexp\0",
REG_ECOLLATE => "Unknown collating element\0",
REG_ECTYPE => "Unknown character class name\0",
REG_EESCAPE => "Trailing backslash\0",
REG_ESUBREG => "Invalid back reference\0",
REG_EBRACK => "Missing ']'\0",
REG_ENOSYS => "Unsupported operation\0",
REG_EPAREN => "Missing ')'\0",
REG_EBRACE => "Missing '}'\0",
REG_BADBR => "Invalid contents of {}\0",
REG_ERANGE => "Invalid character range\0",
REG_ESPACE => "Out of memory\0",
REG_BADRPT => "Repetition not preceded by valid expression\0",
_ => "Unknown error\0",
};
unsafe {
ptr::copy_nonoverlapping(
string.as_ptr(),
out as *mut u8,
string.len().min(max as usize),
);
}
string.len()
} | }
let regex = &*regex;
|
cluster_info.rs | #![feature(test)]
extern crate test;
use {
rand::{thread_rng, Rng},
solana_core::{
broadcast_stage::{
broadcast_metrics::TransmitShredsStats, broadcast_shreds, BroadcastStage,
},
cluster_nodes::ClusterNodesCache,
},
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::ContactInfo,
},
solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo},
shred::Shred,
},
solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_sdk::{
pubkey,
signature::Keypair,
timing::{timestamp, AtomicInterval},
}, | solana_streamer::socket::SocketAddrSpace,
std::{
collections::HashMap,
net::UdpSocket,
sync::{Arc, RwLock},
time::Duration,
},
test::Bencher,
};
#[bench]
fn broadcast_shreds_bench(bencher: &mut Bencher) {
solana_logger::setup();
let leader_pubkey = pubkey::new_rand();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = ClusterInfo::new(
leader_info.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new_for_benches(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
const NUM_SHREDS: usize = 32;
let shred = Shred::new_from_data(0, 0, 0, None, false, false, 0, 0, 0);
let shreds = vec![shred; NUM_SHREDS];
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {
let id = pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&id, timestamp());
cluster_info.insert_info(contact_info);
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);
}
let cluster_info = Arc::new(cluster_info);
let cluster_nodes_cache = ClusterNodesCache::<BroadcastStage>::new(
8, // cap
Duration::from_secs(5), // ttl
);
let shreds = Arc::new(shreds);
let last_datapoint = Arc::new(AtomicInterval::default());
bencher.iter(move || {
let shreds = shreds.clone();
broadcast_shreds(
&socket,
&shreds,
&cluster_nodes_cache,
&last_datapoint,
&mut TransmitShredsStats::default(),
&cluster_info,
&bank_forks,
&SocketAddrSpace::Unspecified,
)
.unwrap();
});
} | |
logging.py | """Logging utilities."""
import asyncio
import logging
import threading
from .async_ import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text):
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record):
|
# pylint: disable=invalid-name
class AsyncHandler(object):
"""Logging handler wrapper to add an async layer."""
def __init__(self, loop, handler):
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self):
"""Wrap close to handler."""
self.emit(None)
@asyncio.coroutine
def async_close(self, blocking=False):
"""Close the handler.
When blocking=True, will wait till closed.
"""
yield from self._queue.put(None)
if blocking:
while self._thread.is_alive():
yield from asyncio.sleep(0, loop=self.loop)
def emit(self, record):
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self):
"""Return the string names."""
return str(self.handler)
def _process(self):
"""Process log in a thread."""
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self):
"""Ignore lock stuff."""
pass
def acquire(self):
"""Ignore lock stuff."""
pass
def release(self):
"""Ignore lock stuff."""
pass
@property
def level(self):
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self):
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self):
"""Wrap property set_name to handler."""
return self.handler.get_name()
@name.setter
def name(self, name):
"""Wrap property get_name to handler."""
self.handler.name = name
| """Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, '*******')
return True |
options.py | import operator
from functools import reduce, partial
from django import forms
from django.forms.formsets import all_valid
from django.urls import reverse
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin import widgets, helpers
from django.contrib.admin.utils import (
unquote, flatten_fieldsets, get_deleted_objects,
)
from django.contrib.admin.options import (
TO_FIELD_VAR, IS_POPUP_VAR,
get_ul_class, csrf_protect_m,
)
from django.utils.html import escape
from django.core.exceptions import PermissionDenied
try:
from django.db.models.related import RelatedObject
except ImportError:
from django.db.models.fields.related import ForeignObjectRel as RelatedObject # noqa
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from django.forms.forms import pretty_name
from django.forms.models import modelform_defines_fields
from django.conf import settings
from django.apps import apps
from mongoengine import Q
from django_mongoengine.utils import force_text
from django_mongoengine.fields import (ListField, EmbeddedDocumentField,
ReferenceField, StringField)
from django_mongoengine.mongo_admin.util import RelationWrapper
from django_mongoengine.utils.wrappers import copy_class
from django_mongoengine.utils.monkey import get_patched_django_module
from django_mongoengine.forms.documents import (
DocumentForm, documentform_factory, documentformset_factory,
inlineformset_factory, BaseInlineDocumentFormSet)
def get_content_type_for_model(obj):
return apps.get_model("contenttypes.ContentType")()
djmod = get_patched_django_module(
"django.contrib.admin.options",
get_content_type_for_model=get_content_type_for_model,
)
class BaseDocumentAdmin(djmod.ModelAdmin):
"""Functionality common to both ModelAdmin and InlineAdmin."""
form = DocumentForm
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices is not None:
return self.formfield_for_choice_field(db_field, request, **kwargs)
if isinstance(db_field, ListField) and isinstance(db_field.field, ReferenceField):
return self.formfield_for_manytomany(db_field, request, **kwargs)
# handle RelatedFields
if isinstance(db_field, ReferenceField):
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
form_field = db_field.formfield(**kwargs)
if db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.document_type)
can_add_related = bool(related_modeladmin and
related_modeladmin.has_add_permission(request))
form_field.widget = widgets.RelatedFieldWidgetWrapper(
form_field.widget, RelationWrapper(db_field.document_type), self.admin_site,
can_add_related=can_add_related)
return form_field
if isinstance(db_field, StringField):
if db_field.max_length is None:
kwargs = dict({'widget': widgets.AdminTextareaWidget}, **kwargs)
else:
kwargs = dict({'widget': widgets.AdminTextInputWidget}, **kwargs)
return db_field.formfield(**kwargs)
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[klass], **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank = db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(pretty_name(db_field.name), (db_field.name in self.filter_vertical))
return db_field.formfield(**kwargs)
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': 0,
'object_id': obj.pk
})
@copy_class(djmod.ModelAdmin)
class DocumentAdmin(BaseDocumentAdmin):
"Encapsulates all admin options and functionality for a given model."
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(DocumentAdmin, self).__init__(model, admin_site)
self.log = not settings.DATABASES.get('default', {}).get(
'ENGINE', 'django.db.backends.dummy'
).endswith('dummy')
self.change_list_template = 'admin/change_document_list.html'
# XXX: add inline init somewhere
def _get_inline_instances(self):
for f in self.model._fields.items():
if not (isinstance(f, ListField) and isinstance(getattr(f, 'field', None), EmbeddedDocumentField)) and not isinstance(f, EmbeddedDocumentField):
continue
# Should only reach here if there is an embedded document...
if f.name in self.exclude:
continue
document = self.model()
if hasattr(f, 'field') and f.field is not None:
embedded_document = f.field.document_type
elif hasattr(f, 'document_type'):
embedded_document = f.document_type
else:
# For some reason we found an embedded field were either
# the field attribute or the field's document type is None.
# This shouldn't happen, but apparently does happen:
# https://github.com/jschrewe/django-mongoadmin/issues/4
# The solution for now is to ignore that field entirely.
continue
inline_admin = EmbeddedStackedDocumentAdmin
# check if there is an admin for the embedded document in
# self.inlines. If there is, use this, else use default.
for inline_class in self.inlines:
if inline_class.document == embedded_document:
inline_admin = inline_class
inline_instance = inline_admin(f, document, self.admin_site)
# if f is an EmbeddedDocumentField set the maximum allowed form instances to one
if isinstance(f, EmbeddedDocumentField):
inline_instance.max_num = 1
# exclude field from normal form
if f.name not in self.exclude:
self.exclude.append(f.name)
if f.name == 'created_at' and f.name not in self.exclude:
self.exclude.append(f.name)
self.inline_instances.append(inline_instance)
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form')):
defaults['fields'] = forms.ALL_FIELDS
return documentform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return documentformset_factory(
self.model, self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults
)
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django_mongoengine.mongo_admin.views import DocumentChangeList
return DocumentChangeList
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
if not self.log:
return
super(DocumentAdmin, self).log_addition(request, object, message)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
if not self.log:
return
super(DocumentAdmin, self).log_change(request, object, message)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method is called
before the deletion.
The default implementation creates an admin LogEntry object.
"""
if not self.log:
return
super(DocumentAdmin, self).log_deletion(request, object, object_repr)
@property
def media(self):
return djmod.ModelAdmin.media.fget(self)
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
if request.method == 'POST' and '_saveasnew' in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
from django.db import router
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(
self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
class InlineDocumentAdmin(BaseDocumentAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``name`` to specify the attribute name of the ``ForeignKey`` from
``model`` to its parent. This is required if ``model`` has more than one
``ForeignKey`` to its parent.
"""
document = None
fk_name = None
formset = BaseInlineDocumentFormSet
extra = 1
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
def __init__(self, parent_document, admin_site):
self.admin_site = admin_site
self.parent_document = parent_document
self.opts = self.model._meta
super(InlineDocumentAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
media = djmod.ModelAdmin.media
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.get_readonly_fields(request, obj))
# if exclude is an empty list we use None, since that's the actual
# default
exclude = exclude or None
defaults = {
"form": self.form,
"formset": self.formset,
"fields": fields,
"exclude": exclude,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
"extra": self.extra,
"max_num": self.max_num,
"can_delete": self.can_delete,
}
defaults.update(kwargs)
return inlineformset_factory(self.model, **defaults)
def get_fieldsets(self, request, obj=None):
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_formset(request).form
fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
class EmbeddedDocumentAdmin(InlineDocumentAdmin):
def __init__(self, field, parent_document, admin_site):
if hasattr(field, 'field'):
self.model = field.field.document_type
else:
self.model = field.document_type
self.doc_list = getattr(parent_document, field.name)
self.field = field
if not isinstance(self.doc_list, list):
self.doc_list = []
self.rel_name = field.name
if self.verbose_name is None:
self.verbose_name = "Field: %s (Document: %s)" % (capfirst(field.name), self.model._meta.verbose_name)
if self.verbose_name_plural is None:
self.verbose_name_plural = "Field: %s (Document: %s)" % (capfirst(field.name), self.model._meta.verbose_name_plural)
super(EmbeddedDocumentAdmin, self).__init__(parent_document, admin_site)
def | (self, request):
if isinstance(self.field, ListField): # list field
self.doc_list = getattr(self.parent_document, self.rel_name)
else: # embedded field
emb_doc = getattr(self.parent_document, self.rel_name)
if emb_doc is None:
self.doc_list = []
else:
self.doc_list = [emb_doc]
return self.doc_list
class StackedDocumentInline(InlineDocumentAdmin):
template = 'admin/edit_inline/stacked.html'
class EmbeddedStackedDocumentAdmin(EmbeddedDocumentAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularDocumentInline(InlineDocumentAdmin):
template = 'admin/edit_inline/tabular.html'
| queryset |
codec.rs | use core::convert::TryFrom;
use libipld_core::cid::Cid;
use libipld_core::ipld::Ipld;
use serde::de::Error as SerdeError;
use serde::{de, ser, Deserialize, Serialize};
use serde_json::ser::Serializer;
use serde_json::Error;
use std::collections::BTreeMap;
use std::fmt;
use std::io::{Read, Write};
const LINK_KEY: &str = "/";
pub fn encode<W: Write>(ipld: &Ipld, writer: &mut W) -> Result<(), Error> {
let mut ser = Serializer::new(writer);
serialize(&ipld, &mut ser)?;
Ok(())
}
pub fn decode<R: Read>(r: &mut R) -> Result<Ipld, Error> {
let mut de = serde_json::Deserializer::from_reader(r);
deserialize(&mut de)
}
fn serialize<S: ser::Serializer>(ipld: &Ipld, ser: S) -> Result<S::Ok, S::Error> {
match &ipld {
Ipld::Null => ser.serialize_none(),
Ipld::Bool(bool) => ser.serialize_bool(*bool),
Ipld::Integer(i128) => ser.serialize_i128(*i128),
Ipld::Float(f64) => ser.serialize_f64(*f64),
Ipld::String(string) => ser.serialize_str(&string),
Ipld::Bytes(bytes) => ser.serialize_bytes(&bytes),
Ipld::List(list) => {
let wrapped = list.iter().map(|ipld| Wrapper(ipld));
ser.collect_seq(wrapped)
}
Ipld::StringMap(map) => {
let wrapped = map.iter().map(|(key, ipld)| (key, Wrapper(ipld)));
ser.collect_map(wrapped)
}
#[cfg(feature = "unleashed")]
Ipld::IntegerMap(map) => {
let wrapped = map.iter().map(|(key, ipld)| (key, Wrapper(ipld)));
ser.collect_map(wrapped)
}
#[cfg(feature = "unleashed")]
Ipld::Tag(tag, ipld) => {
let mut map = BTreeMap::new();
map.insert("/", (tag, Wrapper(ipld)));
ser.collect_map(map)
}
Ipld::Link(link) => {
let value = base64::encode(&link.to_bytes());
let mut map = BTreeMap::new();
map.insert("/", value);
ser.collect_map(map)
}
}
}
fn deserialize<'de, D: de::Deserializer<'de>>(deserializer: D) -> Result<Ipld, D::Error> {
// Sadly such a PhantomData hack is needed
deserializer.deserialize_any(JsonVisitor)
}
// Needed for `collect_seq` and `collect_map` in Seserializer
struct Wrapper<'a>(&'a Ipld);
impl<'a> Serialize for Wrapper<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
serialize(&self.0, serializer)
}
}
// serde deserializer visitor that is used by Deseraliazer to decode
// json into IPLD.
struct JsonVisitor;
impl<'de> de::Visitor<'de> for JsonVisitor {
type Value = Ipld;
fn expecting(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.write_str("any valid JSON value")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
self.visit_string(String::from(value))
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Ipld::String(value))
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
self.visit_byte_buf(v.to_owned())
}
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Ipld::Bytes(v))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Ipld::Integer(v.into()))
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Ipld::Integer(v.into()))
}
fn visit_i128<E>(self, v: i128) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Ipld::Integer(v))
} | E: de::Error,
{
Ok(Ipld::Bool(v))
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
self.visit_unit()
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Ipld::Null)
}
fn visit_seq<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: de::SeqAccess<'de>,
{
let mut vec: Vec<WrapperOwned> = Vec::new();
while let Some(elem) = visitor.next_element()? {
vec.push(elem);
}
let unwrapped = vec.into_iter().map(|WrapperOwned(ipld)| ipld).collect();
Ok(Ipld::List(unwrapped))
}
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: de::MapAccess<'de>,
{
let mut values: Vec<(String, WrapperOwned)> = Vec::new();
while let Some((key, value)) = visitor.next_entry()? {
values.push((key, value));
}
// JSON Object represents IPLD Link if it is `{ "/": "...." }` therefor
// we valiadet if that is the case here.
if let Some((key, WrapperOwned(Ipld::String(value)))) = values.first() {
if key == LINK_KEY && values.len() == 1 {
let link = base64::decode(value).map_err(SerdeError::custom)?;
let cid = Cid::try_from(link).map_err(SerdeError::custom)?;
return Ok(Ipld::Link(cid));
}
}
let unwrapped = values
.into_iter()
.map(|(key, WrapperOwned(value))| (key, value))
.collect();
Ok(Ipld::StringMap(unwrapped))
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Ipld::Float(v))
}
}
// Needed for `visit_seq` and `visit_map` in Deserializer
/// We cannot directly implement `serde::Deserializer` for `Ipld` as it is a remote type.
/// Instead wrap it into a newtype struct and implement `serde::Deserialize` for that one.
/// All the deserializer does is calling the `deserialize()` function we defined which returns
/// an unwrapped `Ipld` instance. Wrap that `Ipld` instance in `Wrapper` and return it.
/// Users of this wrapper will then unwrap it again so that they can return the expected `Ipld`
/// instance.
struct WrapperOwned(Ipld);
impl<'de> Deserialize<'de> for WrapperOwned {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: de::Deserializer<'de>,
{
let deserialized = deserialize(deserializer);
// Better version of Ok(Wrapper(deserialized.unwrap()))
deserialized.map(Self)
}
} |
fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
where |
v1alpha1_test.go | package v1alpha1
import (
"log"
"os"
"path/filepath"
"testing"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var cfg *rest.Config
var c client.Client
func TestMain(m *testing.M) | {
t := &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "deploy", "crds")},
}
err := SchemeBuilder.AddToScheme(scheme.Scheme)
if err != nil {
log.Fatal(err)
}
if cfg, err = t.Start(); err != nil {
log.Fatal(err)
}
if c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}); err != nil {
log.Fatal(err)
}
code := m.Run()
t.Stop()
os.Exit(code)
} |
|
unwind.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of Rust stack unwinding
//!
//! For background on exception handling and stack unwinding please see
//! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
//! documents linked from it.
//! These are also good reads:
//! http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
//! http://monoinfinito.wordpress.com/series/exception-handling-in-c/
//! http://www.airs.com/blog/index.php?s=exception+frames
//!
//! ## A brief summary
//!
//! Exception handling happens in two phases: a search phase and a cleanup phase.
//!
//! In both phases the unwinder walks stack frames from top to bottom using
//! information from the stack frame unwind sections of the current process's
//! modules ("module" here refers to an OS module, i.e. an executable or a
//! dynamic library).
//!
//! For each stack frame, it invokes the associated "personality routine", whose
//! address is also stored in the unwind info section.
//!
//! In the search phase, the job of a personality routine is to examine exception
//! object being thrown, and to decide whether it should be caught at that stack
//! frame. Once the handler frame has been identified, cleanup phase begins.
//!
//! In the cleanup phase, personality routines invoke cleanup code associated
//! with their stack frames (i.e. destructors). Once stack has been unwound down
//! to the handler frame level, unwinding stops and the last personality routine
//! transfers control to its catch block.
//!
//! ## Frame unwind info registration
//!
//! Each module has its own frame unwind info section (usually ".eh_frame"), and
//! unwinder needs to know about all of them in order for unwinding to be able to
//! cross module boundaries.
//!
//! On some platforms, like Linux, this is achieved by dynamically enumerating
//! currently loaded modules via the dl_iterate_phdr() API and finding all
//! .eh_frame sections.
//!
//! Others, like Windows, require modules to actively register their unwind info
//! sections by calling __register_frame_info() API at startup. In the latter
//! case it is essential that there is only one copy of the unwinder runtime in
//! the process. This is usually achieved by linking to the dynamic version of
//! the unwind runtime.
//!
//! Currently Rust uses unwind runtime provided by libgcc.
use prelude::v1::*;
use any::Any;
use boxed;
use cell::Cell;
use cmp;
use panicking;
use fmt;
use intrinsics;
use libc::c_void;
use mem;
use sync::atomic::{self, Ordering};
use sys_common::mutex::{Mutex, MUTEX_INIT};
use rt::libunwind as uw;
struct Exception {
uwe: uw::_Unwind_Exception,
cause: Option<Box<Any + Send + 'static>>,
}
pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: usize);
// Variables used for invoking callbacks when a thread starts to unwind.
//
// For more information, see below.
const MAX_CALLBACKS: usize = 16;
static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] =
[atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT];
static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
/// Invoke a closure, capturing the cause of panic if one occurs.
///
/// This function will return `Ok(())` if the closure did not panic, and will
/// return `Err(cause)` if the closure panics. The `cause` returned is the
/// object with which panic was originally invoked.
///
/// This function also is unsafe for a variety of reasons:
///
/// * This is not safe to call in a nested fashion. The unwinding
/// interface for Rust is designed to have at most one try/catch block per
/// thread, not multiple. No runtime checking is currently performed to uphold
/// this invariant, so this function is not safe. A nested try/catch block
/// may result in corruption of the outer try/catch block's state, especially
/// if this is used within a thread itself.
///
/// * It is not sound to trigger unwinding while already unwinding. Rust threads
/// have runtime checks in place to ensure this invariant, but it is not
/// guaranteed that a rust thread is in place when invoking this function.
/// Unwinding twice can lead to resource leaks where some destructors are not
/// run.
pub unsafe fn try<F: FnOnce()>(f: F) -> Result<(), Box<Any + Send>> {
let mut f = Some(f);
let prev = PANICKING.with(|s| s.get());
PANICKING.with(|s| s.set(false));
let ep = rust_try(try_fn::<F>, &mut f as *mut _ as *mut c_void);
PANICKING.with(|s| s.set(prev));
return if ep.is_null() {
Ok(())
} else {
let my_ep = ep as *mut Exception;
rtdebug!("caught {}", (*my_ep).uwe.exception_class);
let cause = (*my_ep).cause.take();
uw::_Unwind_DeleteException(ep);
Err(cause.unwrap())
};
extern fn try_fn<F: FnOnce()>(opt_closure: *mut c_void) {
let opt_closure = opt_closure as *mut Option<F>;
unsafe { (*opt_closure).take().unwrap()(); }
}
#[link(name = "rustrt_native", kind = "static")]
#[cfg(not(test))]
extern {}
extern {
// Rust's try-catch
// When f(...) returns normally, the return value is null.
// When f(...) throws, the return value is a pointer to the caught
// exception object.
fn rust_try(f: extern fn(*mut c_void),
data: *mut c_void) -> *mut uw::_Unwind_Exception;
}
}
/// Determines whether the current thread is unwinding because of panic.
pub fn panicking() -> bool {
PANICKING.with(|s| s.get())
}
// An uninlined, unmangled function upon which to slap yer breakpoints
#[inline(never)]
#[no_mangle]
#[allow(private_no_mangle_fns)]
fn rust_panic(cause: Box<Any + Send + 'static>) -> ! {
rtdebug!("begin_unwind()");
unsafe {
let exception: Box<_> = box Exception {
uwe: uw::_Unwind_Exception {
exception_class: rust_exception_class(),
exception_cleanup: exception_cleanup,
private: [0; uw::unwinder_private_data_size],
},
cause: Some(cause),
};
let exception_param = boxed::into_raw(exception) as *mut uw::_Unwind_Exception;
let error = uw::_Unwind_RaiseException(exception_param);
rtabort!("Could not unwind stack, error = {}", error as isize)
}
extern fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
exception: *mut uw::_Unwind_Exception) {
rtdebug!("exception_cleanup()");
unsafe {
let _: Box<Exception> = Box::from_raw(exception as *mut Exception);
}
}
}
// Rust's exception class identifier. This is used by personality routines to
// determine whether the exception was thrown by their own runtime.
fn rust_exception_class() -> uw::_Unwind_Exception_Class {
// M O Z \0 R U S T -- vendor, language
0x4d4f5a_00_52555354
}
// We could implement our personality routine in pure Rust, however exception
// info decoding is tedious. More importantly, personality routines have to
// handle various platform quirks, which are not fun to maintain. For this
// reason, we attempt to reuse personality routine of the C language:
// __gcc_personality_v0.
//
// Since C does not support exception catching, __gcc_personality_v0 simply
// always returns _URC_CONTINUE_UNWIND in search phase, and always returns
// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
//
// This is pretty close to Rust's exception handling approach, except that Rust
// does have a single "catch-all" handler at the bottom of each thread's stack.
// So we have two versions of the personality routine:
// - rust_eh_personality, used by all cleanup landing pads, which never catches,
// so the behavior of __gcc_personality_v0 is perfectly adequate there, and
// - rust_eh_personality_catch, used only by rust_try(), which always catches.
//
// Note, however, that for implementation simplicity, rust_eh_personality_catch
// lacks code to install a landing pad, so in order to obtain exception object
// pointer (which it needs to return upstream), rust_try() employs another trick:
// it calls into the nested rust_try_inner(), whose landing pad does not resume
// unwinds. Instead, it extracts the exception pointer and performs a "normal"
// return.
//
// See also: rt/rust_try.ll
#[cfg(all(not(target_arch = "arm"),
not(all(windows, target_arch = "x86_64")),
not(test)))]
#[doc(hidden)]
pub mod eabi {
use rt::libunwind as uw;
use libc::c_int;
extern "C" {
fn __gcc_personality_v0(version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context)
-> uw::_Unwind_Reason_Code;
}
#[lang="eh_personality"]
#[no_mangle] // referenced from rust_try.ll
#[allow(private_no_mangle_fns)]
extern fn rust_eh_personality(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
unsafe {
__gcc_personality_v0(version, actions, exception_class, ue_header,
context)
}
}
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality_catch(
_version: c_int,
actions: uw::_Unwind_Action,
_exception_class: uw::_Unwind_Exception_Class,
_ue_header: *mut uw::_Unwind_Exception,
_context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
uw::_URC_INSTALL_CONTEXT
}
}
}
// iOS on armv7 is using SjLj exceptions and therefore requires to use
// a specialized personality routine: __gcc_personality_sj0
#[cfg(all(target_os = "ios", target_arch = "arm", not(test)))]
#[doc(hidden)]
pub mod eabi {
use rt::libunwind as uw;
use libc::c_int;
extern "C" {
fn __gcc_personality_sj0(version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context)
-> uw::_Unwind_Reason_Code;
}
#[lang="eh_personality"]
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
unsafe {
__gcc_personality_sj0(version, actions, exception_class, ue_header,
context)
}
}
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality_catch(
_version: c_int,
actions: uw::_Unwind_Action,
_exception_class: uw::_Unwind_Exception_Class,
_ue_header: *mut uw::_Unwind_Exception,
_context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
unsafe {
__gcc_personality_sj0(_version, actions, _exception_class, _ue_header,
_context)
}
}
}
}
// ARM EHABI uses a slightly different personality routine signature,
// but otherwise works the same.
#[cfg(all(target_arch = "arm", not(target_os = "ios"), not(test)))]
#[doc(hidden)]
pub mod eabi {
use rt::libunwind as uw;
use libc::c_int;
extern "C" {
fn __gcc_personality_v0(state: uw::_Unwind_State,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context)
-> uw::_Unwind_Reason_Code;
}
#[lang="eh_personality"]
#[no_mangle] // referenced from rust_try.ll
#[allow(private_no_mangle_fns)]
extern "C" fn rust_eh_personality(
state: uw::_Unwind_State,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
unsafe {
__gcc_personality_v0(state, ue_header, context)
}
}
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality_catch(
state: uw::_Unwind_State,
_ue_header: *mut uw::_Unwind_Exception,
_context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (state as c_int & uw::_US_ACTION_MASK as c_int)
== uw::_US_VIRTUAL_UNWIND_FRAME as c_int { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
uw::_URC_INSTALL_CONTEXT
}
}
}
// Win64 SEH (see http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx)
//
// This looks a bit convoluted because rather than implementing a native SEH handler,
// GCC reuses the same personality routine as for the other architectures by wrapping it
// with an "API translator" layer (_GCC_specific_handler).
#[cfg(all(windows, target_arch = "x86_64", not(test)))]
#[doc(hidden)]
#[allow(non_camel_case_types, non_snake_case)]
pub mod eabi {
pub use self::EXCEPTION_DISPOSITION::*;
use rt::libunwind as uw;
use libc::{c_void, c_int};
#[repr(C)]
pub struct EXCEPTION_RECORD;
#[repr(C)]
pub struct CONTEXT;
#[repr(C)]
pub struct DISPATCHER_CONTEXT;
#[repr(C)]
#[derive(Copy, Clone)]
pub enum EXCEPTION_DISPOSITION {
ExceptionContinueExecution,
ExceptionContinueSearch,
ExceptionNestedException,
ExceptionCollidedUnwind
}
type _Unwind_Personality_Fn =
extern "C" fn(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code;
extern "C" {
fn __gcc_personality_seh0(
exceptionRecord: *mut EXCEPTION_RECORD,
establisherFrame: *mut c_void,
contextRecord: *mut CONTEXT,
dispatcherContext: *mut DISPATCHER_CONTEXT
) -> EXCEPTION_DISPOSITION;
fn _GCC_specific_handler(
exceptionRecord: *mut EXCEPTION_RECORD,
establisherFrame: *mut c_void,
contextRecord: *mut CONTEXT,
dispatcherContext: *mut DISPATCHER_CONTEXT,
personality: _Unwind_Personality_Fn
) -> EXCEPTION_DISPOSITION;
}
#[lang="eh_personality"]
#[no_mangle] // referenced from rust_try.ll
#[allow(private_no_mangle_fns)]
extern "C" fn rust_eh_personality(
exceptionRecord: *mut EXCEPTION_RECORD,
establisherFrame: *mut c_void,
contextRecord: *mut CONTEXT,
dispatcherContext: *mut DISPATCHER_CONTEXT
) -> EXCEPTION_DISPOSITION
{
unsafe {
__gcc_personality_seh0(exceptionRecord, establisherFrame,
contextRecord, dispatcherContext)
}
}
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality_catch(
exceptionRecord: *mut EXCEPTION_RECORD,
establisherFrame: *mut c_void,
contextRecord: *mut CONTEXT,
dispatcherContext: *mut DISPATCHER_CONTEXT
) -> EXCEPTION_DISPOSITION
{
extern "C" fn inner(
_version: c_int,
actions: uw::_Unwind_Action,
_exception_class: uw::_Unwind_Exception_Class,
_ue_header: *mut uw::_Unwind_Exception,
_context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
uw::_URC_INSTALL_CONTEXT
}
}
unsafe {
_GCC_specific_handler(exceptionRecord, establisherFrame,
contextRecord, dispatcherContext,
inner)
}
}
}
#[cfg(not(test))]
/// Entry point of panic from the libcore crate.
#[lang = "panic_fmt"]
pub extern fn rust_begin_unwind(msg: fmt::Arguments,
file: &'static str, line: usize) -> ! {
begin_unwind_fmt(msg, &(file, line))
}
/// The entry point for unwinding with a formatted message.
///
/// This is designed to reduce the amount of code required at the call
/// site as much as possible (so that `panic!()` has as low an impact
/// on (e.g.) the inlining of other functions as possible), by moving
/// the actual formatting into this shared place.
#[inline(never)] #[cold]
#[stable(since = "1.0.0", feature = "rust1")]
pub fn begin_unwind_fmt(msg: fmt::Arguments, file_line: &(&'static str, usize)) -> ! {
use fmt::Write;
// We do two allocations here, unfortunately. But (a) they're
// required with the current scheme, and (b) we don't handle
// panic + OOM properly anyway (see comment in begin_unwind
// below).
let mut s = String::new();
let _ = write!(&mut s, "{}", msg);
begin_unwind_inner(Box::new(s), file_line)
}
/// This is the entry point of unwinding for panic!() and assert!().
#[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
#[stable(since = "1.0.0", feature = "rust1")]
pub fn begin_unwind<M: Any + Send>(msg: M, file_line: &(&'static str, usize)) -> ! {
// Note that this should be the only allocation performed in this code path.
// Currently this means that panic!() on OOM will invoke this code path,
// but then again we're not really ready for panic on OOM anyway. If
// we do start doing this, then we should propagate this allocation to
// be performed in the parent of this thread instead of the thread that's
// panicking.
// see below for why we do the `Any` coercion here.
begin_unwind_inner(Box::new(msg), file_line)
}
/// The core of the unwinding.
///
/// This is non-generic to avoid instantiation bloat in other crates
/// (which makes compilation of small crates noticeably slower). (Note:
/// we need the `Any` object anyway, we're not just creating it to
/// avoid being generic.)
///
/// Doing this split took the LLVM IR line counts of `fn main() { panic!()
/// }` from ~1900/3700 (-O/no opts) to 180/590.
#[inline(never)] #[cold] // this is the slow path, please never inline this
fn begin_unwind_inner(msg: Box<Any + Send>,
file_line: &(&'static str, usize)) -> ! {
// Make sure the default failure handler is registered before we look at the
// callbacks. We also use a raw sys-based mutex here instead of a
// `std::sync` one as accessing TLS can cause weird recursive problems (and
// we don't need poison checking).
unsafe {
static LOCK: Mutex = MUTEX_INIT;
static mut INIT: bool = false;
LOCK.lock();
if !INIT {
register(panicking::on_panic);
INIT = true;
}
LOCK.unlock();
}
// First, invoke call the user-defined callbacks triggered on thread panic.
//
// By the time that we see a callback has been registered (by reading
// MAX_CALLBACKS), the actual callback itself may have not been stored yet,
// so we just chalk it up to a race condition and move on to the next
// callback. Additionally, CALLBACK_CNT may briefly be higher than
// MAX_CALLBACKS, so we're sure to clamp it as necessary.
let callbacks = {
let amt = CALLBACK_CNT.load(Ordering::SeqCst);
&CALLBACKS[..cmp::min(amt, MAX_CALLBACKS)]
};
for cb in callbacks {
match cb.load(Ordering::SeqCst) {
0 => {}
n => {
let f: Callback = unsafe { mem::transmute(n) };
let (file, line) = *file_line;
f(&*msg, file, line);
}
}
};
// Now that we've run all the necessary unwind callbacks, we actually
// perform the unwinding.
if panicking() {
// If a thread panics while it's already unwinding then we
// have limited options. Currently our preference is to
// just abort. In the future we may consider resuming
// unwinding or otherwise exiting the thread cleanly.
rterrln!("thread panicked while panicking. aborting.");
unsafe { intrinsics::abort() }
}
PANICKING.with(|s| s.set(true));
rust_panic(msg);
}
/// Register a callback to be invoked when a thread unwinds.
///
/// This is an unsafe and experimental API which allows for an arbitrary
/// callback to be invoked when a thread panics. This callback is invoked on both
/// the initial unwinding and a double unwinding if one occurs. Additionally,
/// the local `Task` will be in place for the duration of the callback, and
/// the callback must ensure that it remains in place once the callback returns.
///
/// Only a limited number of callbacks can be registered, and this function
/// returns whether the callback was successfully registered or not. It is not
/// currently possible to unregister a callback once it has been registered.
#[unstable(feature = "std_misc")]
pub unsafe fn | (f: Callback) -> bool {
match CALLBACK_CNT.fetch_add(1, Ordering::SeqCst) {
// The invocation code has knowledge of this window where the count has
// been incremented, but the callback has not been stored. We're
// guaranteed that the slot we're storing into is 0.
n if n < MAX_CALLBACKS => {
let prev = CALLBACKS[n].swap(mem::transmute(f), Ordering::SeqCst);
rtassert!(prev == 0);
true
}
// If we accidentally bumped the count too high, pull it back.
_ => {
CALLBACK_CNT.store(MAX_CALLBACKS, Ordering::SeqCst);
false
}
}
}
| register |
WebsocketClientsBrowserCtrl.js | const { Controller, syslib } = require('../../../sys');
class WebsocketClientsBrowserCtrl extends Controller {
constructor(app) {
super();
}
| this.setDescription('Websocket client for Browser platform or browserify. The client works best in conjuction with the Regoch Websocket Server.');
this.setKeywords('websocket, client, browser, regoch');
this.setLang('en');
this.loadCSS(['https://cdnjs.cloudflare.com/ajax/libs/prism/1.23.0/themes/prism-coy.min.css']);
await this.loadView('#primary', 'pages/websocket/clients/browser/primary.html', 'sibling');
this.loadViews([
['#sidebar', 'pages/websocket/clients/browser/sidebar.html'],
['#article-introduction', 'pages/websocket/clients/browser/article-introduction.html'],
['#article-rwclientbrowser', 'pages/websocket/clients/browser/article-rwclientbrowser.html'],
['#article-helper', 'pages/websocket/clients/browser/article-helper.html'],
['#article-router', 'pages/websocket/clients/nodejs/article-router.html']
], true);
}
async postrend(trx) {
this.showButtonBars = true;
await this.rgIf('showButtonBars');
await syslib.util.sleep(1300);
this.lazyJS([
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.15.8/highlight.min.js',
'/assets/js/highlight-custom.js',
'/assets/plugins/jquery.scrollTo.min.js',
'/assets/plugins/lightbox/dist/ekko-lightbox.min.js',
'/assets/js/docs.js',
'https://cdnjs.cloudflare.com/ajax/libs/prism/1.23.0/prism.min.js'
]);
}
destroy(elem, event) {
this.unlazyJS();
}
}
module.exports = WebsocketClientsBrowserCtrl; | async loader(trx) {
this.setTitle('Regoch Websocket Client for Browser'); |
inspect_builder.go | package commands
import (
"bytes"
"fmt"
"io"
"strings"
"text/tabwriter"
"text/template"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/buildpacks/pack"
"github.com/buildpacks/pack/internal/config"
"github.com/buildpacks/pack/internal/dist"
"github.com/buildpacks/pack/internal/style"
"github.com/buildpacks/pack/logging"
)
func InspectBuilder(logger logging.Logger, cfg config.Config, client PackClient) *cobra.Command {
cmd := &cobra.Command{
Use: "inspect-builder <builder-image-name>",
Short: "Show information about a builder",
Args: cobra.MaximumNArgs(1),
RunE: logError(logger, func(cmd *cobra.Command, args []string) error {
if cfg.DefaultBuilder == "" && len(args) == 0 {
suggestSettingBuilder(logger, client)
return pack.NewSoftError()
}
imageName := cfg.DefaultBuilder
if len(args) >= 1 {
imageName = args[0]
}
verbose := logger.IsVerbose()
presentRemote, remoteOutput, remoteWarnings, remoteErr := inspectBuilderOutput(client, cfg, imageName, false, verbose)
presentLocal, localOutput, localWarnings, localErr := inspectBuilderOutput(client, cfg, imageName, true, verbose)
if !presentRemote && !presentLocal {
return errors.New(fmt.Sprintf("Unable to find builder '%s' locally or remotely.\n", imageName))
}
if imageName == cfg.DefaultBuilder {
logger.Infof("Inspecting default builder: %s\n", style.Symbol(imageName))
} else {
logger.Infof("Inspecting builder: %s\n", style.Symbol(imageName))
}
if remoteErr != nil {
logger.Error(remoteErr.Error())
} else {
logger.Infof("\nREMOTE:\n%s\n", remoteOutput)
for _, w := range remoteWarnings {
logger.Warn(w)
}
}
if localErr != nil {
logger.Error(localErr.Error())
} else {
logger.Infof("\nLOCAL:\n%s\n", localOutput)
for _, w := range localWarnings {
logger.Warn(w)
}
}
return nil
}),
}
AddHelpFlag(cmd, "inspect-builder")
return cmd
}
func inspectBuilderOutput(client PackClient, cfg config.Config, imageName string, local bool, verbose bool) (present bool, output string, warning []string, err error) {
source := "remote"
if local {
source = "local"
}
info, err := client.InspectBuilder(imageName, local)
if err != nil {
return true, "", nil, errors.Wrapf(err, "inspecting %s image '%s'", source, imageName)
}
if info == nil {
return false, "(not present)", nil, nil
}
var buf bytes.Buffer
warnings, err := generateBuilderOutput(&buf, imageName, cfg, *info, verbose)
if err != nil {
return true, "", nil, errors.Wrapf(err, "writing output for %s image '%s'", source, imageName)
}
return true, buf.String(), warnings, nil
}
func generateBuilderOutput(writer io.Writer, imageName string, cfg config.Config, info pack.BuilderInfo, verbose bool) (warnings []string, err error) {
tpl := template.Must(template.New("").Parse(`
{{ if ne .Info.Description "" -}}
Description: {{ .Info.Description }}
{{ end -}}
{{- if ne .Info.CreatedBy.Name "" -}}
Created By:
Name: {{ .Info.CreatedBy.Name }}
Version: {{ .Info.CreatedBy.Version }}
{{ end -}}
Trusted: {{.Trusted}}
Stack:
ID: {{ .Info.Stack }}
{{- if .Verbose}}
{{- if ne (len .Info.Mixins) 0 }}
Mixins:
{{- end }}
{{- range $index, $mixin := .Info.Mixins }}
{{ $mixin }}
{{- end }}
{{- end }}
Lifecycle:
Version: {{- if .Info.Lifecycle.Info.Version }} {{ .Info.Lifecycle.Info.Version }}{{- else }} (none){{- end }}
Buildpack API: {{- if .Info.Lifecycle.API.BuildpackVersion }} {{ .Info.Lifecycle.API.BuildpackVersion }}{{- else }} (none){{- end }}
Platform API: {{- if .Info.Lifecycle.API.PlatformVersion }} {{ .Info.Lifecycle.API.PlatformVersion }}{{- else }} (none){{- end }}
Run Images:
{{- if ne .RunImages "" }}
{{ .RunImages }}
{{- else }}
(none)
{{- end }}
Buildpacks:
{{- if .Info.Buildpacks }}
{{ .Buildpacks }}
{{- else }}
(none)
{{- end }}
Detection Order:
{{- if ne .Order "" }}
{{ .Order }}
{{- else }}
(none)
{{ end }}`,
))
bps, err := buildpacksOutput(info.Buildpacks)
if err != nil {
return nil, err
}
if len(info.Buildpacks) == 0 {
warnings = append(warnings, fmt.Sprintf("%s has no buildpacks", style.Symbol(imageName)))
warnings = append(warnings, "Users must supply buildpacks from the host machine")
}
order, err := detectionOrderOutput(info.Order)
if err != nil {
return nil, err
}
if len(info.Order) == 0 {
warnings = append(warnings, fmt.Sprintf("%s does not specify detection order", style.Symbol(imageName)))
warnings = append(warnings, "Users must build with explicitly specified buildpacks")
}
runImgs, err := runImagesOutput(info.RunImage, info.RunImageMirrors, cfg)
if err != nil {
return nil, err
}
if info.RunImage == "" {
warnings = append(warnings, fmt.Sprintf("%s does not specify a run image", style.Symbol(imageName)))
warnings = append(warnings, "Users must build with an explicitly specified run image")
}
lcDescriptor := &info.Lifecycle
if lcDescriptor.Info.Version == nil {
warnings = append(warnings, fmt.Sprintf("%s does not specify lifecycle version", style.Symbol(imageName)))
}
if lcDescriptor.API.BuildpackVersion == nil {
warnings = append(warnings, fmt.Sprintf("%s does not specify lifecycle buildpack api version", style.Symbol(imageName)))
}
if lcDescriptor.API.PlatformVersion == nil {
warnings = append(warnings, fmt.Sprintf("%s does not specify lifecycle platform api version", style.Symbol(imageName)))
}
trustedString := "No"
if isTrustedBuilder(cfg, imageName) {
trustedString = "Yes"
}
return warnings, tpl.Execute(writer, &struct {
Info pack.BuilderInfo
Buildpacks string
RunImages string
Order string
Verbose bool
Trusted string
}{
info,
bps,
runImgs,
order,
verbose,
trustedString,
})
}
// TODO: present buildpack order (inc. nested) [https://github.com/buildpacks/pack/issues/253].
func buildpacksOutput(bps []dist.BuildpackInfo) (string, error) {
buf := &bytes.Buffer{}
tabWriter := new(tabwriter.Writer).Init(buf, 0, 0, 8, ' ', 0)
if _, err := fmt.Fprint(tabWriter, " ID\tVERSION\tHOMEPAGE\n"); err != nil {
return "", err
}
for _, bp := range bps {
if _, err := fmt.Fprintf(tabWriter, " %s\t%s\t%s\n", bp.ID, bp.Version, bp.Homepage); err != nil |
}
if err := tabWriter.Flush(); err != nil {
return "", err
}
return strings.TrimSuffix(buf.String(), "\n"), nil
}
func runImagesOutput(runImage string, mirrors []string, cfg config.Config) (string, error) {
buf := &bytes.Buffer{}
tabWriter := new(tabwriter.Writer).Init(buf, 0, 0, 4, ' ', 0)
for _, r := range getLocalMirrors(runImage, cfg) {
if _, err := fmt.Fprintf(tabWriter, " %s\t(user-configured)\n", r); err != nil {
return "", err
}
}
if runImage != "" {
if _, err := fmt.Fprintf(tabWriter, " %s\n", runImage); err != nil {
return "", err
}
}
for _, r := range mirrors {
if _, err := fmt.Fprintf(tabWriter, " %s\n", r); err != nil {
return "", err
}
}
if err := tabWriter.Flush(); err != nil {
return "", err
}
return strings.TrimSuffix(buf.String(), "\n"), nil
}
func detectionOrderOutput(order dist.Order) (string, error) {
buf := strings.Builder{}
for i, group := range order {
buf.WriteString(fmt.Sprintf(" Group #%d:\n", i+1))
tabWriter := new(tabwriter.Writer).Init(&buf, 0, 0, 4, ' ', 0)
for _, bp := range group.Group {
var optional string
if bp.Optional {
optional = "(optional)"
}
bpRef := bp.ID
if bp.Version != "" {
bpRef += "@" + bp.Version
}
if _, err := fmt.Fprintf(tabWriter, " %s\t%s\n", bpRef, optional); err != nil {
return "", err
}
}
if err := tabWriter.Flush(); err != nil {
return "", err
}
}
return strings.TrimSuffix(buf.String(), "\n"), nil
}
func getLocalMirrors(runImage string, cfg config.Config) []string {
for _, ri := range cfg.RunImages {
if ri.Image == runImage {
return ri.Mirrors
}
}
return nil
}
| {
return "", err
} |
api_op_ListResourcesForTagOption.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package servicecatalog
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/servicecatalog/types"
smithy "github.com/awslabs/smithy-go"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Lists the resources associated with the specified TagOption.
func (c *Client) ListResourcesForTagOption(ctx context.Context, params *ListResourcesForTagOptionInput, optFns ...func(*Options)) (*ListResourcesForTagOptionOutput, error) {
stack := middleware.NewStack("ListResourcesForTagOption", smithyhttp.NewStackRequest)
options := c.options.Copy()
for _, fn := range optFns {
fn(&options)
}
addawsAwsjson11_serdeOpListResourcesForTagOptionMiddlewares(stack)
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
addResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
addRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addOpListResourcesForTagOptionValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opListResourcesForTagOption(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
for _, fn := range options.APIOptions {
if err := fn(stack); err != nil {
return nil, err
}
}
handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
result, metadata, err := handler.Handle(ctx, params)
if err != nil |
out := result.(*ListResourcesForTagOptionOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListResourcesForTagOptionInput struct {
// The TagOption identifier.
//
// This member is required.
TagOptionId *string
// The maximum number of items to return with this call.
PageSize *int32
// The page token for the next set of results. To retrieve the first set of
// results, use null.
PageToken *string
// The resource type.
//
// * Portfolio
//
// * Product
ResourceType *string
}
type ListResourcesForTagOptionOutput struct {
// The page token for the next set of results. To retrieve the first set of
// results, use null.
PageToken *string
// Information about the resources.
ResourceDetails []*types.ResourceDetail
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addawsAwsjson11_serdeOpListResourcesForTagOptionMiddlewares(stack *middleware.Stack) {
stack.Serialize.Add(&awsAwsjson11_serializeOpListResourcesForTagOption{}, middleware.After)
stack.Deserialize.Add(&awsAwsjson11_deserializeOpListResourcesForTagOption{}, middleware.After)
}
func newServiceMetadataMiddleware_opListResourcesForTagOption(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "servicecatalog",
OperationName: "ListResourcesForTagOption",
}
}
| {
return nil, &smithy.OperationError{
ServiceID: ServiceID,
OperationName: "ListResourcesForTagOption",
Err: err,
}
} |
batch_client.go | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"net/http"
v1 "k8s.io/api/batch/v1"
"k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
type BatchV1Interface interface {
RESTClient() rest.Interface
CronJobsGetter
JobsGetter
}
// BatchV1Client is used to interact with features provided by the batch group.
type BatchV1Client struct {
restClient rest.Interface
cluster string
}
func (c *BatchV1Client) CronJobs(namespace string) CronJobInterface {
return newCronJobs(c, namespace)
}
func (c *BatchV1Client) Jobs(namespace string) JobInterface {
return newJobs(c, namespace)
}
// NewForConfig creates a new BatchV1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*BatchV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new BatchV1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*BatchV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil |
return &BatchV1Client{restClient: client}, nil
}
// NewForConfigOrDie creates a new BatchV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *BatchV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new BatchV1Client for the given RESTClient.
func New(c rest.Interface) *BatchV1Client {
return &BatchV1Client{restClient: c}
}
// NewWithCluster creates a new BatchV1Client for the given RESTClient and cluster.
func NewWithCluster(c rest.Interface, cluster string) *BatchV1Client {
return &BatchV1Client{restClient: c, cluster: cluster}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *BatchV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
| {
return nil, err
} |
sam-kepsek.component.ts | import { Component } from '@angular/core';
@Component({
selector: 'ngx-sam-kepsek',
styleUrls: ['./sam-kepsek.component.scss'],
templateUrl: './sam-kepsek.component.html',
})
export class | {
starRate: number = 2;
heartRate: number = 4;
}
| SamKepsekComponent |
datapartition.go | // Copyright 2018 The Chubao Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package cmd
import (
"fmt"
"github.com/chubaofs/chubaofs/proto"
"github.com/chubaofs/chubaofs/sdk/master"
"github.com/spf13/cobra"
"sort"
"strconv"
)
const (
cmdDataPartitionUse = "datapartition [COMMAND]"
cmdDataPartitionShort = "Manage data partition"
)
func newDataPartitionCmd(client *master.MasterClient) *cobra.Command {
var cmd = &cobra.Command{
Use: cmdDataPartitionUse,
Short: cmdDataPartitionShort,
}
cmd.AddCommand(
newDataPartitionGetCmd(client),
newListCorruptDataPartitionCmd(client),
newDataPartitionDecommissionCmd(client),
newDataPartitionReplicateCmd(client),
newDataPartitionDeleteReplicaCmd(client),
)
return cmd
}
const (
cmdDataPartitionGetShort = "Display detail information of a data partition"
cmdCheckCorruptDataPartitionShort = "Check out corrupt data partitions"
cmdDataPartitionDecommissionShort = "Decommission a replication of the data partition to a new address"
cmdDataPartitionReplicateShort = "Add a replication of the data partition on a new address"
cmdDataPartitionDeleteReplicaShort = "Delete a replication of the data partition on a fixed address"
)
func newDataPartitionGetCmd(client *master.MasterClient) *cobra.Command {
var cmd = &cobra.Command{
Use: CliOpInfo + " [DATA PARTITION ID]",
Short: cmdDataPartitionGetShort,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
var (
err error
partitionID uint64
partition *proto.DataPartitionInfo
)
defer func() {
if err != nil {
errout("Error:%v", err)
OsExitWithLogFlush()
}
}()
if partitionID, err = strconv.ParseUint(args[0], 10, 64); err != nil {
return
}
if partition, err = client.AdminAPI().GetDataPartition("", partitionID); err != nil {
return
}
stdout(formatDataPartitionInfo(partition))
},
}
return cmd
}
func newListCorruptDataPartitionCmd(client *master.MasterClient) *cobra.Command {
var cmd = &cobra.Command{
Use: CliOpCheck,
Short: cmdCheckCorruptDataPartitionShort,
Long: `If the data nodes are marked as "Inactive", it means the nodes has been not available for a time. It is suggested to
eliminate the network, disk or other problems first. Once the bad nodes can never be "active", they are called corrupt
nodes. The "decommission" command can be used to discard the corrupt nodes. However, if more than half replicas of
a partition are on the corrupt nodes, the few remaining replicas can not reach an agreement with one leader. In this case,
you can use the "reset" command to fix the problem.The "reset" command may lead to data loss, be careful to do this.
The "reset" command will be released in next version`,
Run: func(cmd *cobra.Command, args []string) {
var (
diagnosis *proto.DataPartitionDiagnosis
dataNodes []*proto.DataNodeInfo
err error
)
defer func() {
if err != nil {
errout("Error:%v", err)
OsExitWithLogFlush()
}
}()
if diagnosis, err = client.AdminAPI().DiagnoseDataPartition(); err != nil {
return
}
stdout("[Inactive Data nodes]:\n")
stdout("%v\n", formatDataNodeDetailTableHeader())
for _, addr := range diagnosis.InactiveDataNodes {
var node *proto.DataNodeInfo
if node, err = client.NodeAPI().GetDataNode(addr); err != nil {
return
}
dataNodes = append(dataNodes, node)
}
sort.SliceStable(dataNodes, func(i, j int) bool {
return dataNodes[i].ID < dataNodes[j].ID
})
for _, node := range dataNodes {
stdout("%v\n", formatDataNodeDetail(node, true))
}
stdout("\n")
stdout("[Corrupt data partitions](no leader):\n")
stdout("%v\n", partitionInfoTableHeader)
sort.SliceStable(diagnosis.CorruptDataPartitionIDs, func(i, j int) bool {
return diagnosis.CorruptDataPartitionIDs[i] < diagnosis.CorruptDataPartitionIDs[j]
})
for _, pid := range diagnosis.CorruptDataPartitionIDs {
var partition *proto.DataPartitionInfo
if partition, err = client.AdminAPI().GetDataPartition("", pid); err != nil {
err = fmt.Errorf("Partition not found, err:[%v] ", err)
return
}
stdout("%v\n", formatDataPartitionInfoRow(partition))
}
stdout("\n")
stdout("%v\n", "[Partition lack replicas]:")
sort.SliceStable(diagnosis.LackReplicaDataPartitionIDs, func(i, j int) bool {
return diagnosis.LackReplicaDataPartitionIDs[i] < diagnosis.LackReplicaDataPartitionIDs[j]
})
for _, pid := range diagnosis.LackReplicaDataPartitionIDs {
var partition *proto.DataPartitionInfo
if partition, err = client.AdminAPI().GetDataPartition("", pid); err != nil {
err = fmt.Errorf("Partition not found, err:[%v] ", err)
return
}
if partition != nil {
stdout("%v\n", formatDataPartitionInfoRow(partition))
}
}
return
},
}
return cmd
}
func newDataPartitionDecommissionCmd(client *master.MasterClient) *cobra.Command {
var cmd = &cobra.Command{
Use: CliOpDecommission + " [ADDRESS] [DATA PARTITION ID]",
Short: cmdDataPartitionDecommissionShort,
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
var (
err error
partitionID uint64
)
defer func() {
if err != nil {
errout("Error:%v", err)
OsExitWithLogFlush()
}
}()
address := args[0]
partitionID, err = strconv.ParseUint(args[1], 10, 64)
if err != nil {
return
}
if err = client.AdminAPI().DecommissionDataPartition(partitionID, address); err != nil {
return
}
},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return validDataNodes(client, toComplete), cobra.ShellCompDirectiveNoFileComp
},
}
return cmd
}
func | (client *master.MasterClient) *cobra.Command {
var cmd = &cobra.Command{
Use: CliOpReplicate + " [ADDRESS] [DATA PARTITION ID]",
Short: cmdDataPartitionReplicateShort,
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
var (
err error
partitionID uint64
)
defer func() {
if err != nil {
errout("Error:%v", err)
OsExitWithLogFlush()
}
}()
address := args[0]
if partitionID, err = strconv.ParseUint(args[1], 10, 64); err != nil {
return
}
if err = client.AdminAPI().AddDataReplica(partitionID, address); err != nil {
return
}
},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return validDataNodes(client, toComplete), cobra.ShellCompDirectiveNoFileComp
},
}
return cmd
}
func newDataPartitionDeleteReplicaCmd(client *master.MasterClient) *cobra.Command {
var cmd = &cobra.Command{
Use: CliOpDelReplica + " [ADDRESS] [DATA PARTITION ID]",
Short: cmdDataPartitionDeleteReplicaShort,
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
var (
err error
partitionID uint64
)
defer func() {
if err != nil {
errout("Error:%v", err)
OsExitWithLogFlush()
}
}()
address := args[0]
if partitionID, err = strconv.ParseUint(args[1], 10, 64); err != nil {
return
}
if err = client.AdminAPI().DeleteDataReplica(partitionID, address); err != nil {
return
}
},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return validDataNodes(client, toComplete), cobra.ShellCompDirectiveNoFileComp
},
}
return cmd
}
| newDataPartitionReplicateCmd |
test_bundle.js | // modules are defined as an array
// [ module function, map of requireuires ]
//
// map of requireuires is short require name -> numeric require
//
// anything defined in a previous bundle is accessed via the
// orig method which is the requireuire for previous bundles
(function outer (modules, cache, entry) {
// Save the require from previous bundle to this closure if any
var previousRequire = typeof require == "function" && require;
function findProxyquireifyName() {
var deps = Object.keys(modules)
.map(function (k) { return modules[k][1]; });
for (var i = 0; i < deps.length; i++) {
var pq = deps[i]['proxyquireify'];
if (pq) return pq;
}
}
var proxyquireifyName = findProxyquireifyName();
function newRequire(name, jumped){
// Find the proxyquireify module, if present
var pqify = (proxyquireifyName != null) && cache[proxyquireifyName];
// Proxyquireify provides a separate cache that is used when inside
// a proxyquire call, and is set to null outside a proxyquire call.
// This allows the regular caching semantics to work correctly both
// inside and outside proxyquire calls while keeping the cached
// modules isolated.
// When switching from one proxyquire call to another, it clears
// the cache to prevent contamination between different sets
// of stubs.
var currentCache = (pqify && pqify.exports._cache) || cache;
if(!currentCache[name]) {
if(!modules[name]) {
// if we cannot find the the module within our internal map or
// cache jump to the current global require ie. the last bundle
// that was added to the page.
var currentRequire = typeof require == "function" && require;
if (!jumped && currentRequire) return currentRequire(name, true);
// If there are other bundles on this page the require from the
// previous one is saved to 'previousRequire'. Repeat this as
// many times as there are bundles until the module is found or
// we exhaust the require chain.
if (previousRequire) return previousRequire(name, true);
var err = new Error('Cannot find module \'' + name + '\'');
err.code = 'MODULE_NOT_FOUND';
throw err;
}
var m = currentCache[name] = {exports:{}};
// The normal browserify require function
var req = function(x){
var id = modules[name][1][x];
return newRequire(id ? id : x);
};
// The require function substituted for proxyquireify
var moduleRequire = function(x){
var pqify = (proxyquireifyName != null) && cache[proxyquireifyName];
// Only try to use the proxyquireify version if it has been `require`d
if (pqify && pqify.exports._proxy) {
return pqify.exports._proxy(req, x);
} else {
return req(x);
}
};
modules[name][0].call(m.exports,moduleRequire,m,m.exports,outer,modules,currentCache,entry);
}
return currentCache[name].exports;
}
for(var i=0;i<entry.length;i++) newRequire(entry[i]);
// Override the current require with this new one
return newRequire;
})
({1:[function(require,module,exports){
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
/**
* Half-precision floating-point negative infinity.
*
* @module @stdlib/constants/float16/ninf
* @type {number}
*
* @example
* var FLOAT16_NINF = require( '@stdlib/constants/float16/ninf' );
* // returns -infinity
*/
// MODULES //
var Number = require( '@stdlib/number/ctor' );
// MAIN //
/**
* Half-precision floating-point negative infinity.
*
* ## Notes
*
* Half-precision floating-point negative infinity has the bit sequence
*
* ```binarystring
* 1 11111 0000000000
* ```
*
* This bit sequence corresponds to the unsigned 16-bit integer `64512` and to the HEX value `0xfc00`.
*
* @constant
* @type {number}
* @default -infinity
* @see [half-precision floating-point format]{@link https://en.wikipedia.org/wiki/Half-precision_floating-point_format}
*/
var FLOAT16_NINF = Number.NEGATIVE_INFINITY; // TODO: if, and when, float16 is natively available, assign the true constant; otherwise, fall back to float64
// EXPORTS //
module.exports = FLOAT16_NINF;
},{"@stdlib/number/ctor":3}],2:[function(require,module,exports){
(function (__filename){(function (){
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var tape = require( 'tape' );
var Number = require( '@stdlib/number/ctor' );
var FLOAT16_NINF = require( './../lib' );
// TESTS //
tape( 'main export is a number', function test( t ) {
t.ok( true, __filename );
t.equal( typeof FLOAT16_NINF, 'number', 'main export is a number' );
t.end();
});
tape( 'export is equal to negative infinity', function test( t ) {
// Note: we rely on implicit type promotion. A 16-bit infinity should be promoted to a 64-bit infinity...
t.equal( FLOAT16_NINF, Number.NEGATIVE_INFINITY, 'equals -infinity' );
t.end();
});
}).call(this)}).call(this,"/lib/node_modules/@stdlib/constants/float16/ninf/test/test.js")
},{"./../lib":1,"@stdlib/number/ctor":3,"tape":108}],3:[function(require,module,exports){
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
/**
* Constructor which returns a `Number` object.
*
* @module @stdlib/number/ctor
*
* @example
* var Number = require( '@stdlib/number/ctor' );
*
* var v = new Number( 10.0 );
* // returns <Number>
*/
// MODULES //
var Number = require( './number.js' );
// EXPORTS //
module.exports = Number;
},{"./number.js":4}],4:[function(require,module,exports){
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// EXPORTS //
module.exports = Number; // eslint-disable-line stdlib/require-globals
},{}],5:[function(require,module,exports){
'use strict'
exports.byteLength = byteLength
exports.toByteArray = toByteArray
exports.fromByteArray = fromByteArray
var lookup = []
var revLookup = []
var Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array
var code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
for (var i = 0, len = code.length; i < len; ++i) {
lookup[i] = code[i]
revLookup[code.charCodeAt(i)] = i
}
// Support decoding URL-safe base64 strings, as Node.js does.
// See: https://en.wikipedia.org/wiki/Base64#URL_applications
revLookup['-'.charCodeAt(0)] = 62
revLookup['_'.charCodeAt(0)] = 63
function getLens (b64) {
var len = b64.length
if (len % 4 > 0) {
throw new Error('Invalid string. Length must be a multiple of 4')
}
// Trim off extra bytes after placeholder bytes are found
// See: https://github.com/beatgammit/base64-js/issues/42
var validLen = b64.indexOf('=')
if (validLen === -1) validLen = len
var placeHoldersLen = validLen === len
? 0
: 4 - (validLen % 4)
return [validLen, placeHoldersLen]
}
// base64 is 4/3 + up to two characters of the original data
function byteLength (b64) {
var lens = getLens(b64)
var validLen = lens[0]
var placeHoldersLen = lens[1]
return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen
}
function _byteLength (b64, validLen, placeHoldersLen) {
return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen
}
function toByteArray (b64) {
var tmp
var lens = getLens(b64)
var validLen = lens[0]
var placeHoldersLen = lens[1]
var arr = new Arr(_byteLength(b64, validLen, placeHoldersLen))
var curByte = 0
// if there are placeholders, only get up to the last complete 4 chars
var len = placeHoldersLen > 0
? validLen - 4
: validLen
var i
for (i = 0; i < len; i += 4) {
tmp =
(revLookup[b64.charCodeAt(i)] << 18) |
(revLookup[b64.charCodeAt(i + 1)] << 12) |
(revLookup[b64.charCodeAt(i + 2)] << 6) |
revLookup[b64.charCodeAt(i + 3)]
arr[curByte++] = (tmp >> 16) & 0xFF
arr[curByte++] = (tmp >> 8) & 0xFF
arr[curByte++] = tmp & 0xFF
}
if (placeHoldersLen === 2) {
tmp =
(revLookup[b64.charCodeAt(i)] << 2) |
(revLookup[b64.charCodeAt(i + 1)] >> 4)
arr[curByte++] = tmp & 0xFF
}
if (placeHoldersLen === 1) {
tmp =
(revLookup[b64.charCodeAt(i)] << 10) |
(revLookup[b64.charCodeAt(i + 1)] << 4) |
(revLookup[b64.charCodeAt(i + 2)] >> 2)
arr[curByte++] = (tmp >> 8) & 0xFF
arr[curByte++] = tmp & 0xFF
}
return arr
}
function tripletToBase64 (num) {
return lookup[num >> 18 & 0x3F] +
lookup[num >> 12 & 0x3F] +
lookup[num >> 6 & 0x3F] +
lookup[num & 0x3F]
}
function encodeChunk (uint8, start, end) {
var tmp
var output = []
for (var i = start; i < end; i += 3) {
tmp =
((uint8[i] << 16) & 0xFF0000) +
((uint8[i + 1] << 8) & 0xFF00) +
(uint8[i + 2] & 0xFF)
output.push(tripletToBase64(tmp))
}
return output.join('')
}
function fromByteArray (uint8) {
var tmp
var len = uint8.length
var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes
var parts = []
var maxChunkLength = 16383 // must be multiple of 3
// go through the array every three bytes, we'll deal with trailing stuff later
for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) {
parts.push(encodeChunk(uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength)))
}
// pad the end with zeros, but make sure to not forget the extra bytes
if (extraBytes === 1) {
tmp = uint8[len - 1]
parts.push(
lookup[tmp >> 2] +
lookup[(tmp << 4) & 0x3F] +
'=='
)
} else if (extraBytes === 2) {
tmp = (uint8[len - 2] << 8) + uint8[len - 1]
parts.push(
lookup[tmp >> 10] +
lookup[(tmp >> 4) & 0x3F] +
lookup[(tmp << 2) & 0x3F] +
'='
)
}
return parts.join('')
}
},{}],6:[function(require,module,exports){
},{}],7:[function(require,module,exports){
arguments[4][6][0].apply(exports,arguments)
},{"dup":6}],8:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
var R = typeof Reflect === 'object' ? Reflect : null
var ReflectApply = R && typeof R.apply === 'function'
? R.apply
: function ReflectApply(target, receiver, args) {
return Function.prototype.apply.call(target, receiver, args);
}
var ReflectOwnKeys
if (R && typeof R.ownKeys === 'function') {
ReflectOwnKeys = R.ownKeys
} else if (Object.getOwnPropertySymbols) {
ReflectOwnKeys = function ReflectOwnKeys(target) {
return Object.getOwnPropertyNames(target)
.concat(Object.getOwnPropertySymbols(target));
};
} else {
ReflectOwnKeys = function ReflectOwnKeys(target) {
return Object.getOwnPropertyNames(target);
};
}
function ProcessEmitWarning(warning) {
if (console && console.warn) console.warn(warning);
}
var NumberIsNaN = Number.isNaN || function NumberIsNaN(value) {
return value !== value;
}
function EventEmitter() {
EventEmitter.init.call(this);
}
module.exports = EventEmitter;
module.exports.once = once;
// Backwards-compat with node 0.10.x
EventEmitter.EventEmitter = EventEmitter;
EventEmitter.prototype._events = undefined;
EventEmitter.prototype._eventsCount = 0;
EventEmitter.prototype._maxListeners = undefined;
// By default EventEmitters will print a warning if more than 10 listeners are
// added to it. This is a useful default which helps finding memory leaks.
var defaultMaxListeners = 10;
function checkListener(listener) {
if (typeof listener !== 'function') {
throw new TypeError('The "listener" argument must be of type Function. Received type ' + typeof listener);
}
}
Object.defineProperty(EventEmitter, 'defaultMaxListeners', {
enumerable: true,
get: function() {
return defaultMaxListeners;
},
set: function(arg) {
if (typeof arg !== 'number' || arg < 0 || NumberIsNaN(arg)) {
throw new RangeError('The value of "defaultMaxListeners" is out of range. It must be a non-negative number. Received ' + arg + '.');
}
defaultMaxListeners = arg;
}
});
EventEmitter.init = function() {
if (this._events === undefined ||
this._events === Object.getPrototypeOf(this)._events) {
this._events = Object.create(null);
this._eventsCount = 0;
}
this._maxListeners = this._maxListeners || undefined;
};
// Obviously not all Emitters should be limited to 10. This function allows
// that to be increased. Set to zero for unlimited.
EventEmitter.prototype.setMaxListeners = function setMaxListeners(n) {
if (typeof n !== 'number' || n < 0 || NumberIsNaN(n)) {
throw new RangeError('The value of "n" is out of range. It must be a non-negative number. Received ' + n + '.');
}
this._maxListeners = n;
return this;
};
function _getMaxListeners(that) {
if (that._maxListeners === undefined)
return EventEmitter.defaultMaxListeners;
return that._maxListeners;
}
EventEmitter.prototype.getMaxListeners = function getMaxListeners() {
return _getMaxListeners(this);
};
EventEmitter.prototype.emit = function emit(type) {
var args = [];
for (var i = 1; i < arguments.length; i++) args.push(arguments[i]);
var doError = (type === 'error');
var events = this._events;
if (events !== undefined)
doError = (doError && events.error === undefined);
else if (!doError)
return false;
// If there is no 'error' event listener then throw.
if (doError) {
var er;
if (args.length > 0)
er = args[0];
if (er instanceof Error) {
// Note: The comments on the `throw` lines are intentional, they show
// up in Node's output if this results in an unhandled exception.
throw er; // Unhandled 'error' event
}
// At least give some kind of context to the user
var err = new Error('Unhandled error.' + (er ? ' (' + er.message + ')' : ''));
err.context = er;
throw err; // Unhandled 'error' event
}
var handler = events[type];
if (handler === undefined)
return false;
if (typeof handler === 'function') {
ReflectApply(handler, this, args);
} else {
var len = handler.length;
var listeners = arrayClone(handler, len);
for (var i = 0; i < len; ++i)
ReflectApply(listeners[i], this, args);
}
return true;
};
function _addListener(target, type, listener, prepend) {
var m;
var events;
var existing;
checkListener(listener);
events = target._events;
if (events === undefined) {
events = target._events = Object.create(null);
target._eventsCount = 0;
} else {
// To avoid recursion in the case that type === "newListener"! Before
// adding it to the listeners, first emit "newListener".
if (events.newListener !== undefined) {
target.emit('newListener', type,
listener.listener ? listener.listener : listener);
// Re-assign `events` because a newListener handler could have caused the
// this._events to be assigned to a new object
events = target._events;
}
existing = events[type];
}
if (existing === undefined) {
// Optimize the case of one listener. Don't need the extra array object.
existing = events[type] = listener;
++target._eventsCount;
} else {
if (typeof existing === 'function') {
// Adding the second element, need to change to array.
existing = events[type] =
prepend ? [listener, existing] : [existing, listener];
// If we've already got an array, just append.
} else if (prepend) {
existing.unshift(listener);
} else {
existing.push(listener);
}
// Check for listener leak
m = _getMaxListeners(target);
if (m > 0 && existing.length > m && !existing.warned) {
existing.warned = true;
// No error code for this since it is a Warning
// eslint-disable-next-line no-restricted-syntax
var w = new Error('Possible EventEmitter memory leak detected. ' +
existing.length + ' ' + String(type) + ' listeners ' +
'added. Use emitter.setMaxListeners() to ' +
'increase limit');
w.name = 'MaxListenersExceededWarning';
w.emitter = target;
w.type = type;
w.count = existing.length;
ProcessEmitWarning(w);
}
}
return target;
}
EventEmitter.prototype.addListener = function addListener(type, listener) {
return _addListener(this, type, listener, false);
};
EventEmitter.prototype.on = EventEmitter.prototype.addListener;
EventEmitter.prototype.prependListener =
function prependListener(type, listener) {
return _addListener(this, type, listener, true);
};
function onceWrapper() {
if (!this.fired) {
this.target.removeListener(this.type, this.wrapFn);
this.fired = true;
if (arguments.length === 0)
return this.listener.call(this.target);
return this.listener.apply(this.target, arguments);
}
}
function _onceWrap(target, type, listener) {
var state = { fired: false, wrapFn: undefined, target: target, type: type, listener: listener };
var wrapped = onceWrapper.bind(state);
wrapped.listener = listener;
state.wrapFn = wrapped;
return wrapped;
}
EventEmitter.prototype.once = function once(type, listener) {
checkListener(listener);
this.on(type, _onceWrap(this, type, listener));
return this;
};
EventEmitter.prototype.prependOnceListener =
function prependOnceListener(type, listener) {
checkListener(listener);
this.prependListener(type, _onceWrap(this, type, listener));
return this;
};
// Emits a 'removeListener' event if and only if the listener was removed.
EventEmitter.prototype.removeListener =
function removeListener(type, listener) {
var list, events, position, i, originalListener;
checkListener(listener);
events = this._events;
if (events === undefined)
return this;
list = events[type];
if (list === undefined)
return this;
if (list === listener || list.listener === listener) {
if (--this._eventsCount === 0)
this._events = Object.create(null);
else {
delete events[type];
if (events.removeListener)
this.emit('removeListener', type, list.listener || listener);
}
} else if (typeof list !== 'function') {
position = -1;
for (i = list.length - 1; i >= 0; i--) {
if (list[i] === listener || list[i].listener === listener) {
originalListener = list[i].listener;
position = i;
break;
}
}
if (position < 0)
return this;
if (position === 0)
list.shift();
else {
spliceOne(list, position);
}
if (list.length === 1)
events[type] = list[0];
if (events.removeListener !== undefined)
this.emit('removeListener', type, originalListener || listener);
}
return this;
};
EventEmitter.prototype.off = EventEmitter.prototype.removeListener;
EventEmitter.prototype.removeAllListeners =
function removeAllListeners(type) {
var listeners, events, i;
events = this._events;
if (events === undefined)
return this;
// not listening for removeListener, no need to emit
if (events.removeListener === undefined) {
if (arguments.length === 0) {
this._events = Object.create(null);
this._eventsCount = 0;
} else if (events[type] !== undefined) {
if (--this._eventsCount === 0)
this._events = Object.create(null);
else
delete events[type];
}
return this;
}
// emit removeListener for all listeners on all events
if (arguments.length === 0) {
var keys = Object.keys(events);
var key;
for (i = 0; i < keys.length; ++i) {
key = keys[i];
if (key === 'removeListener') continue;
this.removeAllListeners(key);
}
this.removeAllListeners('removeListener');
this._events = Object.create(null);
this._eventsCount = 0;
return this;
}
listeners = events[type];
if (typeof listeners === 'function') {
this.removeListener(type, listeners);
} else if (listeners !== undefined) {
// LIFO order
for (i = listeners.length - 1; i >= 0; i--) {
this.removeListener(type, listeners[i]);
}
}
return this;
};
function _listeners(target, type, unwrap) {
var events = target._events;
if (events === undefined)
return [];
var evlistener = events[type];
if (evlistener === undefined)
return [];
if (typeof evlistener === 'function')
return unwrap ? [evlistener.listener || evlistener] : [evlistener];
return unwrap ?
unwrapListeners(evlistener) : arrayClone(evlistener, evlistener.length);
}
EventEmitter.prototype.listeners = function listeners(type) {
return _listeners(this, type, true);
};
EventEmitter.prototype.rawListeners = function rawListeners(type) {
return _listeners(this, type, false);
};
EventEmitter.listenerCount = function(emitter, type) {
if (typeof emitter.listenerCount === 'function') {
return emitter.listenerCount(type);
} else {
return listenerCount.call(emitter, type);
}
};
EventEmitter.prototype.listenerCount = listenerCount;
function listenerCount(type) {
var events = this._events;
if (events !== undefined) {
var evlistener = events[type];
if (typeof evlistener === 'function') {
return 1;
} else if (evlistener !== undefined) {
return evlistener.length;
}
}
return 0;
}
EventEmitter.prototype.eventNames = function eventNames() {
return this._eventsCount > 0 ? ReflectOwnKeys(this._events) : [];
};
function arrayClone(arr, n) {
var copy = new Array(n);
for (var i = 0; i < n; ++i)
copy[i] = arr[i];
return copy;
}
function spliceOne(list, index) {
for (; index + 1 < list.length; index++)
list[index] = list[index + 1];
list.pop();
}
function unwrapListeners(arr) {
var ret = new Array(arr.length);
for (var i = 0; i < ret.length; ++i) {
ret[i] = arr[i].listener || arr[i];
}
return ret;
}
function once(emitter, name) {
return new Promise(function (resolve, reject) {
function errorListener(err) {
emitter.removeListener(name, resolver);
reject(err);
}
function resolver() {
if (typeof emitter.removeListener === 'function') {
emitter.removeListener('error', errorListener);
}
resolve([].slice.call(arguments));
};
eventTargetAgnosticAddListener(emitter, name, resolver, { once: true });
if (name !== 'error') {
addErrorHandlerIfEventEmitter(emitter, errorListener, { once: true });
}
});
}
function addErrorHandlerIfEventEmitter(emitter, handler, flags) {
if (typeof emitter.on === 'function') {
eventTargetAgnosticAddListener(emitter, 'error', handler, flags);
}
}
function eventTargetAgnosticAddListener(emitter, name, listener, flags) {
if (typeof emitter.on === 'function') {
if (flags.once) {
emitter.once(name, listener);
} else {
emitter.on(name, listener);
}
} else if (typeof emitter.addEventListener === 'function') {
// EventTarget does not have `error` event semantics like Node
// EventEmitters, we do not listen for `error` events here.
emitter.addEventListener(name, function wrapListener(arg) {
// IE does not have builtin `{ once: true }` support so we
// have to do it manually.
if (flags.once) {
emitter.removeEventListener(name, wrapListener);
}
listener(arg);
});
} else {
throw new TypeError('The "emitter" argument must be of type EventEmitter. Received type ' + typeof emitter);
}
}
},{}],9:[function(require,module,exports){
(function (process){(function (){
// 'path' module extracted from Node.js v8.11.1 (only the posix part)
// transplited with Babel
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
function assertPath(path) {
if (typeof path !== 'string') {
throw new TypeError('Path must be a string. Received ' + JSON.stringify(path));
}
}
// Resolves . and .. elements in a path with directory names
function normalizeStringPosix(path, allowAboveRoot) {
var res = '';
var lastSegmentLength = 0;
var lastSlash = -1;
var dots = 0;
var code;
for (var i = 0; i <= path.length; ++i) {
if (i < path.length)
code = path.charCodeAt(i);
else if (code === 47 /*/*/)
break;
else
code = 47 /*/*/;
if (code === 47 /*/*/) {
if (lastSlash === i - 1 || dots === 1) {
// NOOP
} else if (lastSlash !== i - 1 && dots === 2) {
if (res.length < 2 || lastSegmentLength !== 2 || res.charCodeAt(res.length - 1) !== 46 /*.*/ || res.charCodeAt(res.length - 2) !== 46 /*.*/) {
if (res.length > 2) {
var lastSlashIndex = res.lastIndexOf('/');
if (lastSlashIndex !== res.length - 1) {
if (lastSlashIndex === -1) {
res = '';
lastSegmentLength = 0;
} else {
res = res.slice(0, lastSlashIndex);
lastSegmentLength = res.length - 1 - res.lastIndexOf('/');
}
lastSlash = i;
dots = 0;
continue;
}
} else if (res.length === 2 || res.length === 1) {
res = '';
lastSegmentLength = 0;
lastSlash = i;
dots = 0;
continue;
}
}
if (allowAboveRoot) {
if (res.length > 0)
res += '/..';
else
res = '..';
lastSegmentLength = 2;
}
} else {
if (res.length > 0)
res += '/' + path.slice(lastSlash + 1, i);
else
res = path.slice(lastSlash + 1, i);
lastSegmentLength = i - lastSlash - 1;
}
lastSlash = i;
dots = 0;
} else if (code === 46 /*.*/ && dots !== -1) {
++dots;
} else {
dots = -1;
}
}
return res;
}
function _format(sep, pathObject) {
var dir = pathObject.dir || pathObject.root;
var base = pathObject.base || (pathObject.name || '') + (pathObject.ext || '');
if (!dir) {
return base;
}
if (dir === pathObject.root) {
return dir + base;
}
return dir + sep + base;
}
var posix = {
// path.resolve([from ...], to)
resolve: function resolve() {
var resolvedPath = '';
var resolvedAbsolute = false;
var cwd;
for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
var path;
if (i >= 0)
path = arguments[i];
else {
if (cwd === undefined)
cwd = process.cwd();
path = cwd;
}
assertPath(path);
// Skip empty entries
if (path.length === 0) {
continue;
}
resolvedPath = path + '/' + resolvedPath;
resolvedAbsolute = path.charCodeAt(0) === 47 /*/*/;
}
// At this point the path should be resolved to a full absolute path, but
// handle relative paths to be safe (might happen when process.cwd() fails)
// Normalize the path
resolvedPath = normalizeStringPosix(resolvedPath, !resolvedAbsolute);
if (resolvedAbsolute) {
if (resolvedPath.length > 0)
return '/' + resolvedPath;
else
return '/';
} else if (resolvedPath.length > 0) {
return resolvedPath;
} else {
return '.';
}
},
normalize: function normalize(path) {
assertPath(path);
if (path.length === 0) return '.';
var isAbsolute = path.charCodeAt(0) === 47 /*/*/;
var trailingSeparator = path.charCodeAt(path.length - 1) === 47 /*/*/;
// Normalize the path
path = normalizeStringPosix(path, !isAbsolute);
if (path.length === 0 && !isAbsolute) path = '.';
if (path.length > 0 && trailingSeparator) path += '/';
if (isAbsolute) return '/' + path;
return path;
},
isAbsolute: function isAbsolute(path) {
assertPath(path);
return path.length > 0 && path.charCodeAt(0) === 47 /*/*/;
},
join: function join() {
if (arguments.length === 0)
return '.';
var joined;
for (var i = 0; i < arguments.length; ++i) {
var arg = arguments[i];
assertPath(arg);
if (arg.length > 0) {
if (joined === undefined)
joined = arg;
else
joined += '/' + arg;
}
}
if (joined === undefined)
return '.';
return posix.normalize(joined);
},
relative: function relative(from, to) {
assertPath(from);
assertPath(to);
if (from === to) return '';
from = posix.resolve(from);
to = posix.resolve(to);
if (from === to) return '';
// Trim any leading backslashes
var fromStart = 1;
for (; fromStart < from.length; ++fromStart) {
if (from.charCodeAt(fromStart) !== 47 /*/*/)
break;
}
var fromEnd = from.length;
var fromLen = fromEnd - fromStart;
// Trim any leading backslashes
var toStart = 1;
for (; toStart < to.length; ++toStart) {
if (to.charCodeAt(toStart) !== 47 /*/*/)
break;
}
var toEnd = to.length;
var toLen = toEnd - toStart;
// Compare paths to find the longest common path from root
var length = fromLen < toLen ? fromLen : toLen;
var lastCommonSep = -1;
var i = 0;
for (; i <= length; ++i) {
if (i === length) {
if (toLen > length) {
if (to.charCodeAt(toStart + i) === 47 /*/*/) {
// We get here if `from` is the exact base path for `to`.
// For example: from='/foo/bar'; to='/foo/bar/baz'
return to.slice(toStart + i + 1);
} else if (i === 0) {
// We get here if `from` is the root
// For example: from='/'; to='/foo'
return to.slice(toStart + i);
}
} else if (fromLen > length) {
if (from.charCodeAt(fromStart + i) === 47 /*/*/) {
// We get here if `to` is the exact base path for `from`.
// For example: from='/foo/bar/baz'; to='/foo/bar'
lastCommonSep = i;
} else if (i === 0) {
// We get here if `to` is the root.
// For example: from='/foo'; to='/'
lastCommonSep = 0;
}
}
break;
}
var fromCode = from.charCodeAt(fromStart + i);
var toCode = to.charCodeAt(toStart + i);
if (fromCode !== toCode)
break;
else if (fromCode === 47 /*/*/)
lastCommonSep = i;
}
var out = '';
// Generate the relative path based on the path difference between `to`
// and `from`
for (i = fromStart + lastCommonSep + 1; i <= fromEnd; ++i) {
if (i === fromEnd || from.charCodeAt(i) === 47 /*/*/) {
if (out.length === 0)
out += '..';
else
out += '/..';
}
}
// Lastly, append the rest of the destination (`to`) path that comes after
// the common path parts
if (out.length > 0)
return out + to.slice(toStart + lastCommonSep);
else {
toStart += lastCommonSep;
if (to.charCodeAt(toStart) === 47 /*/*/)
++toStart;
return to.slice(toStart);
}
},
_makeLong: function _makeLong(path) {
return path;
},
dirname: function dirname(path) {
assertPath(path);
if (path.length === 0) return '.';
var code = path.charCodeAt(0);
var hasRoot = code === 47 /*/*/;
var end = -1;
var matchedSlash = true;
for (var i = path.length - 1; i >= 1; --i) {
code = path.charCodeAt(i);
if (code === 47 /*/*/) {
if (!matchedSlash) {
end = i;
break;
}
} else {
// We saw the first non-path separator
matchedSlash = false;
}
}
if (end === -1) return hasRoot ? '/' : '.';
if (hasRoot && end === 1) return '//';
return path.slice(0, end);
},
basename: function basename(path, ext) {
if (ext !== undefined && typeof ext !== 'string') throw new TypeError('"ext" argument must be a string');
assertPath(path);
var start = 0;
var end = -1;
var matchedSlash = true;
var i;
if (ext !== undefined && ext.length > 0 && ext.length <= path.length) {
if (ext.length === path.length && ext === path) return '';
var extIdx = ext.length - 1;
var firstNonSlashEnd = -1;
for (i = path.length - 1; i >= 0; --i) {
var code = path.charCodeAt(i);
if (code === 47 /*/*/) {
// If we reached a path separator that was not part of a set of path
// separators at the end of the string, stop now
if (!matchedSlash) {
start = i + 1;
break;
}
} else {
if (firstNonSlashEnd === -1) {
// We saw the first non-path separator, remember this index in case
// we need it if the extension ends up not matching
matchedSlash = false;
firstNonSlashEnd = i + 1;
}
if (extIdx >= 0) {
// Try to match the explicit extension
if (code === ext.charCodeAt(extIdx)) {
if (--extIdx === -1) {
// We matched the extension, so mark this as the end of our path
// component
end = i;
}
} else {
// Extension does not match, so our result is the entire path
// component
extIdx = -1;
end = firstNonSlashEnd;
}
}
}
}
if (start === end) end = firstNonSlashEnd;else if (end === -1) end = path.length;
return path.slice(start, end);
} else {
for (i = path.length - 1; i >= 0; --i) {
if (path.charCodeAt(i) === 47 /*/*/) {
// If we reached a path separator that was not part of a set of path
// separators at the end of the string, stop now
if (!matchedSlash) {
start = i + 1;
break;
}
} else if (end === -1) {
// We saw the first non-path separator, mark this as the end of our
// path component
matchedSlash = false;
end = i + 1;
}
}
if (end === -1) return '';
return path.slice(start, end);
}
},
extname: function extname(path) {
assertPath(path);
var startDot = -1;
var startPart = 0;
var end = -1;
var matchedSlash = true;
// Track the state of characters (if any) we see before our first dot and
// after any path separator we find
var preDotState = 0;
for (var i = path.length - 1; i >= 0; --i) {
var code = path.charCodeAt(i);
if (code === 47 /*/*/) {
// If we reached a path separator that was not part of a set of path
// separators at the end of the string, stop now
if (!matchedSlash) {
startPart = i + 1;
break;
}
continue;
}
if (end === -1) {
// We saw the first non-path separator, mark this as the end of our
// extension
matchedSlash = false;
end = i + 1;
}
if (code === 46 /*.*/) {
// If this is our first dot, mark it as the start of our extension
if (startDot === -1)
startDot = i;
else if (preDotState !== 1)
preDotState = 1;
} else if (startDot !== -1) {
// We saw a non-dot and non-path separator before our dot, so we should
// have a good chance at having a non-empty extension
preDotState = -1;
}
}
if (startDot === -1 || end === -1 ||
// We saw a non-dot character immediately before the dot
preDotState === 0 ||
// The (right-most) trimmed path component is exactly '..'
preDotState === 1 && startDot === end - 1 && startDot === startPart + 1) {
return '';
}
return path.slice(startDot, end);
},
format: function format(pathObject) {
if (pathObject === null || typeof pathObject !== 'object') {
throw new TypeError('The "pathObject" argument must be of type Object. Received type ' + typeof pathObject);
}
return _format('/', pathObject);
},
parse: function parse(path) {
assertPath(path);
var ret = { root: '', dir: '', base: '', ext: '', name: '' };
if (path.length === 0) return ret;
var code = path.charCodeAt(0);
var isAbsolute = code === 47 /*/*/;
var start;
if (isAbsolute) {
ret.root = '/';
start = 1;
} else {
start = 0;
}
var startDot = -1;
var startPart = 0;
var end = -1;
var matchedSlash = true;
var i = path.length - 1;
// Track the state of characters (if any) we see before our first dot and
// after any path separator we find
var preDotState = 0;
// Get non-dir info
for (; i >= start; --i) {
code = path.charCodeAt(i);
if (code === 47 /*/*/) {
// If we reached a path separator that was not part of a set of path
// separators at the end of the string, stop now
if (!matchedSlash) {
startPart = i + 1;
break;
}
continue;
}
if (end === -1) {
// We saw the first non-path separator, mark this as the end of our
// extension
matchedSlash = false;
end = i + 1;
}
if (code === 46 /*.*/) {
// If this is our first dot, mark it as the start of our extension
if (startDot === -1) startDot = i;else if (preDotState !== 1) preDotState = 1;
} else if (startDot !== -1) {
// We saw a non-dot and non-path separator before our dot, so we should
// have a good chance at having a non-empty extension
preDotState = -1;
}
}
if (startDot === -1 || end === -1 ||
// We saw a non-dot character immediately before the dot
preDotState === 0 ||
// The (right-most) trimmed path component is exactly '..'
preDotState === 1 && startDot === end - 1 && startDot === startPart + 1) {
if (end !== -1) {
if (startPart === 0 && isAbsolute) ret.base = ret.name = path.slice(1, end);else ret.base = ret.name = path.slice(startPart, end);
}
} else {
if (startPart === 0 && isAbsolute) {
ret.name = path.slice(1, startDot);
ret.base = path.slice(1, end);
} else {
ret.name = path.slice(startPart, startDot);
ret.base = path.slice(startPart, end);
}
ret.ext = path.slice(startDot, end);
}
if (startPart > 0) ret.dir = path.slice(0, startPart - 1);else if (isAbsolute) ret.dir = '/';
return ret;
},
sep: '/',
delimiter: ':',
win32: null,
posix: null
};
posix.posix = posix;
module.exports = posix;
}).call(this)}).call(this,require('_process'))
},{"_process":100}],10:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
module.exports = Stream;
var EE = require('events').EventEmitter;
var inherits = require('inherits');
inherits(Stream, EE);
Stream.Readable = require('readable-stream/lib/_stream_readable.js');
Stream.Writable = require('readable-stream/lib/_stream_writable.js');
Stream.Duplex = require('readable-stream/lib/_stream_duplex.js');
Stream.Transform = require('readable-stream/lib/_stream_transform.js');
Stream.PassThrough = require('readable-stream/lib/_stream_passthrough.js');
Stream.finished = require('readable-stream/lib/internal/streams/end-of-stream.js')
Stream.pipeline = require('readable-stream/lib/internal/streams/pipeline.js')
// Backwards-compat with node 0.4.x
Stream.Stream = Stream;
// old-style streams. Note that the pipe method (the only relevant
// part of this class) is overridden in the Readable class.
function Stream() {
EE.call(this);
}
Stream.prototype.pipe = function(dest, options) {
var source = this;
function ondata(chunk) {
if (dest.writable) {
if (false === dest.write(chunk) && source.pause) {
source.pause();
}
}
}
source.on('data', ondata);
function ondrain() {
if (source.readable && source.resume) {
source.resume();
}
}
dest.on('drain', ondrain);
// If the 'end' option is not supplied, dest.end() will be called when
// source gets the 'end' or 'close' events. Only dest.end() once.
if (!dest._isStdio && (!options || options.end !== false)) {
source.on('end', onend);
source.on('close', onclose);
}
var didOnEnd = false;
function onend() {
if (didOnEnd) return;
didOnEnd = true;
dest.end();
}
function onclose() {
if (didOnEnd) return;
didOnEnd = true;
if (typeof dest.destroy === 'function') dest.destroy();
}
// don't leave dangling pipes when there are errors.
function onerror(er) {
cleanup();
if (EE.listenerCount(this, 'error') === 0) {
throw er; // Unhandled stream error in pipe.
}
}
source.on('error', onerror);
dest.on('error', onerror);
// remove all the event listeners that were added.
function cleanup() {
source.removeListener('data', ondata);
dest.removeListener('drain', ondrain);
source.removeListener('end', onend);
source.removeListener('close', onclose);
source.removeListener('error', onerror);
dest.removeListener('error', onerror);
source.removeListener('end', cleanup);
source.removeListener('close', cleanup);
dest.removeListener('close', cleanup);
}
source.on('end', cleanup);
source.on('close', cleanup);
dest.on('close', cleanup);
dest.emit('pipe', source);
// Allow for unix-like usage: A.pipe(B).pipe(C)
return dest;
};
},{"events":8,"inherits":95,"readable-stream/lib/_stream_duplex.js":12,"readable-stream/lib/_stream_passthrough.js":13,"readable-stream/lib/_stream_readable.js":14,"readable-stream/lib/_stream_transform.js":15,"readable-stream/lib/_stream_writable.js":16,"readable-stream/lib/internal/streams/end-of-stream.js":20,"readable-stream/lib/internal/streams/pipeline.js":22}],11:[function(require,module,exports){
'use strict';
function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; }
var codes = {};
function createErrorType(code, message, Base) {
if (!Base) {
Base = Error;
}
function getMessage(arg1, arg2, arg3) {
if (typeof message === 'string') {
return message;
} else {
return message(arg1, arg2, arg3);
}
}
var NodeError =
/*#__PURE__*/
function (_Base) {
_inheritsLoose(NodeError, _Base);
function | (arg1, arg2, arg3) {
return _Base.call(this, getMessage(arg1, arg2, arg3)) || this;
}
return NodeError;
}(Base);
NodeError.prototype.name = Base.name;
NodeError.prototype.code = code;
codes[code] = NodeError;
} // https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js
function oneOf(expected, thing) {
if (Array.isArray(expected)) {
var len = expected.length;
expected = expected.map(function (i) {
return String(i);
});
if (len > 2) {
return "one of ".concat(thing, " ").concat(expected.slice(0, len - 1).join(', '), ", or ") + expected[len - 1];
} else if (len === 2) {
return "one of ".concat(thing, " ").concat(expected[0], " or ").concat(expected[1]);
} else {
return "of ".concat(thing, " ").concat(expected[0]);
}
} else {
return "of ".concat(thing, " ").concat(String(expected));
}
} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith
function startsWith(str, search, pos) {
return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search;
} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith
function endsWith(str, search, this_len) {
if (this_len === undefined || this_len > str.length) {
this_len = str.length;
}
return str.substring(this_len - search.length, this_len) === search;
} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes
function includes(str, search, start) {
if (typeof start !== 'number') {
start = 0;
}
if (start + search.length > str.length) {
return false;
} else {
return str.indexOf(search, start) !== -1;
}
}
createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) {
return 'The value "' + value + '" is invalid for option "' + name + '"';
}, TypeError);
createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) {
// determiner: 'must be' or 'must not be'
var determiner;
if (typeof expected === 'string' && startsWith(expected, 'not ')) {
determiner = 'must not be';
expected = expected.replace(/^not /, '');
} else {
determiner = 'must be';
}
var msg;
if (endsWith(name, ' argument')) {
// For cases like 'first argument'
msg = "The ".concat(name, " ").concat(determiner, " ").concat(oneOf(expected, 'type'));
} else {
var type = includes(name, '.') ? 'property' : 'argument';
msg = "The \"".concat(name, "\" ").concat(type, " ").concat(determiner, " ").concat(oneOf(expected, 'type'));
}
msg += ". Received type ".concat(typeof actual);
return msg;
}, TypeError);
createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF');
createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) {
return 'The ' + name + ' method is not implemented';
});
createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close');
createErrorType('ERR_STREAM_DESTROYED', function (name) {
return 'Cannot call ' + name + ' after a stream was destroyed';
});
createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times');
createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable');
createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end');
createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError);
createErrorType('ERR_UNKNOWN_ENCODING', function (arg) {
return 'Unknown encoding: ' + arg;
}, TypeError);
createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event');
module.exports.codes = codes;
},{}],12:[function(require,module,exports){
(function (process){(function (){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a duplex stream is just a stream that is both readable and writable.
// Since JS doesn't have multiple prototypal inheritance, this class
// prototypally inherits from Readable, and then parasitically from
// Writable.
'use strict';
/*<replacement>*/
var objectKeys = Object.keys || function (obj) {
var keys = [];
for (var key in obj) {
keys.push(key);
}
return keys;
};
/*</replacement>*/
module.exports = Duplex;
var Readable = require('./_stream_readable');
var Writable = require('./_stream_writable');
require('inherits')(Duplex, Readable);
{
// Allow the keys array to be GC'ed.
var keys = objectKeys(Writable.prototype);
for (var v = 0; v < keys.length; v++) {
var method = keys[v];
if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method];
}
}
function Duplex(options) {
if (!(this instanceof Duplex)) return new Duplex(options);
Readable.call(this, options);
Writable.call(this, options);
this.allowHalfOpen = true;
if (options) {
if (options.readable === false) this.readable = false;
if (options.writable === false) this.writable = false;
if (options.allowHalfOpen === false) {
this.allowHalfOpen = false;
this.once('end', onend);
}
}
}
Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.highWaterMark;
}
});
Object.defineProperty(Duplex.prototype, 'writableBuffer', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState && this._writableState.getBuffer();
}
});
Object.defineProperty(Duplex.prototype, 'writableLength', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.length;
}
}); // the no-half-open enforcer
function onend() {
// If the writable side ended, then we're ok.
if (this._writableState.ended) return; // no more data can be written.
// But allow more writes to happen in this tick.
process.nextTick(onEndNT, this);
}
function onEndNT(self) {
self.end();
}
Object.defineProperty(Duplex.prototype, 'destroyed', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
if (this._readableState === undefined || this._writableState === undefined) {
return false;
}
return this._readableState.destroyed && this._writableState.destroyed;
},
set: function set(value) {
// we ignore the value if the stream
// has not been initialized yet
if (this._readableState === undefined || this._writableState === undefined) {
return;
} // backward compatibility, the user is explicitly
// managing destroyed
this._readableState.destroyed = value;
this._writableState.destroyed = value;
}
});
}).call(this)}).call(this,require('_process'))
},{"./_stream_readable":14,"./_stream_writable":16,"_process":100,"inherits":95}],13:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a passthrough stream.
// basically just the most minimal sort of Transform stream.
// Every written chunk gets output as-is.
'use strict';
module.exports = PassThrough;
var Transform = require('./_stream_transform');
require('inherits')(PassThrough, Transform);
function PassThrough(options) {
if (!(this instanceof PassThrough)) return new PassThrough(options);
Transform.call(this, options);
}
PassThrough.prototype._transform = function (chunk, encoding, cb) {
cb(null, chunk);
};
},{"./_stream_transform":15,"inherits":95}],14:[function(require,module,exports){
(function (process,global){(function (){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
module.exports = Readable;
/*<replacement>*/
var Duplex;
/*</replacement>*/
Readable.ReadableState = ReadableState;
/*<replacement>*/
var EE = require('events').EventEmitter;
var EElistenerCount = function EElistenerCount(emitter, type) {
return emitter.listeners(type).length;
};
/*</replacement>*/
/*<replacement>*/
var Stream = require('./internal/streams/stream');
/*</replacement>*/
var Buffer = require('buffer').Buffer;
var OurUint8Array = global.Uint8Array || function () {};
function _uint8ArrayToBuffer(chunk) {
return Buffer.from(chunk);
}
function _isUint8Array(obj) {
return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
}
/*<replacement>*/
var debugUtil = require('util');
var debug;
if (debugUtil && debugUtil.debuglog) {
debug = debugUtil.debuglog('stream');
} else {
debug = function debug() {};
}
/*</replacement>*/
var BufferList = require('./internal/streams/buffer_list');
var destroyImpl = require('./internal/streams/destroy');
var _require = require('./internal/streams/state'),
getHighWaterMark = _require.getHighWaterMark;
var _require$codes = require('../errors').codes,
ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,
ERR_STREAM_PUSH_AFTER_EOF = _require$codes.ERR_STREAM_PUSH_AFTER_EOF,
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
ERR_STREAM_UNSHIFT_AFTER_END_EVENT = _require$codes.ERR_STREAM_UNSHIFT_AFTER_END_EVENT; // Lazy loaded to improve the startup performance.
var StringDecoder;
var createReadableStreamAsyncIterator;
var from;
require('inherits')(Readable, Stream);
var errorOrDestroy = destroyImpl.errorOrDestroy;
var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume'];
function prependListener(emitter, event, fn) {
// Sadly this is not cacheable as some libraries bundle their own
// event emitter implementation with them.
if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn); // This is a hack to make sure that our error handler is attached before any
// userland ones. NEVER DO THIS. This is here only because this code needs
// to continue to work with older versions of Node.js that do not include
// the prependListener() method. The goal is to eventually remove this hack.
if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (Array.isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]];
}
function ReadableState(options, stream, isDuplex) {
Duplex = Duplex || require('./_stream_duplex');
options = options || {}; // Duplex streams are both readable and writable, but share
// the same options object.
// However, some cases require setting options to different
// values for the readable and the writable sides of the duplex stream.
// These options can be provided separately as readableXXX and writableXXX.
if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; // object stream flag. Used to make read(n) ignore n and to
// make all the buffer merging and length checks go away
this.objectMode = !!options.objectMode;
if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode; // the point at which it stops calling _read() to fill the buffer
// Note: 0 is a valid value, means "don't call _read preemptively ever"
this.highWaterMark = getHighWaterMark(this, options, 'readableHighWaterMark', isDuplex); // A linked list is used to store data chunks instead of an array because the
// linked list can remove elements from the beginning faster than
// array.shift()
this.buffer = new BufferList();
this.length = 0;
this.pipes = null;
this.pipesCount = 0;
this.flowing = null;
this.ended = false;
this.endEmitted = false;
this.reading = false; // a flag to be able to tell if the event 'readable'/'data' is emitted
// immediately, or on a later tick. We set this to true at first, because
// any actions that shouldn't happen until "later" should generally also
// not happen before the first read call.
this.sync = true; // whenever we return null, then we set a flag to say
// that we're awaiting a 'readable' event emission.
this.needReadable = false;
this.emittedReadable = false;
this.readableListening = false;
this.resumeScheduled = false;
this.paused = true; // Should close be emitted on destroy. Defaults to true.
this.emitClose = options.emitClose !== false; // Should .destroy() be called after 'end' (and potentially 'finish')
this.autoDestroy = !!options.autoDestroy; // has it been destroyed
this.destroyed = false; // Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8'; // the number of writers that are awaiting a drain event in .pipe()s
this.awaitDrain = 0; // if true, a maybeReadMore has been scheduled
this.readingMore = false;
this.decoder = null;
this.encoding = null;
if (options.encoding) {
if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder;
this.decoder = new StringDecoder(options.encoding);
this.encoding = options.encoding;
}
}
function Readable(options) {
Duplex = Duplex || require('./_stream_duplex');
if (!(this instanceof Readable)) return new Readable(options); // Checking for a Stream.Duplex instance is faster here instead of inside
// the ReadableState constructor, at least with V8 6.5
var isDuplex = this instanceof Duplex;
this._readableState = new ReadableState(options, this, isDuplex); // legacy
this.readable = true;
if (options) {
if (typeof options.read === 'function') this._read = options.read;
if (typeof options.destroy === 'function') this._destroy = options.destroy;
}
Stream.call(this);
}
Object.defineProperty(Readable.prototype, 'destroyed', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
if (this._readableState === undefined) {
return false;
}
return this._readableState.destroyed;
},
set: function set(value) {
// we ignore the value if the stream
// has not been initialized yet
if (!this._readableState) {
return;
} // backward compatibility, the user is explicitly
// managing destroyed
this._readableState.destroyed = value;
}
});
Readable.prototype.destroy = destroyImpl.destroy;
Readable.prototype._undestroy = destroyImpl.undestroy;
Readable.prototype._destroy = function (err, cb) {
cb(err);
}; // Manually shove something into the read() buffer.
// This returns true if the highWaterMark has not been hit yet,
// similar to how Writable.write() returns true if you should
// write() some more.
Readable.prototype.push = function (chunk, encoding) {
var state = this._readableState;
var skipChunkCheck;
if (!state.objectMode) {
if (typeof chunk === 'string') {
encoding = encoding || state.defaultEncoding;
if (encoding !== state.encoding) {
chunk = Buffer.from(chunk, encoding);
encoding = '';
}
skipChunkCheck = true;
}
} else {
skipChunkCheck = true;
}
return readableAddChunk(this, chunk, encoding, false, skipChunkCheck);
}; // Unshift should *always* be something directly out of read()
Readable.prototype.unshift = function (chunk) {
return readableAddChunk(this, chunk, null, true, false);
};
function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) {
debug('readableAddChunk', chunk);
var state = stream._readableState;
if (chunk === null) {
state.reading = false;
onEofChunk(stream, state);
} else {
var er;
if (!skipChunkCheck) er = chunkInvalid(state, chunk);
if (er) {
errorOrDestroy(stream, er);
} else if (state.objectMode || chunk && chunk.length > 0) {
if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) {
chunk = _uint8ArrayToBuffer(chunk);
}
if (addToFront) {
if (state.endEmitted) errorOrDestroy(stream, new ERR_STREAM_UNSHIFT_AFTER_END_EVENT());else addChunk(stream, state, chunk, true);
} else if (state.ended) {
errorOrDestroy(stream, new ERR_STREAM_PUSH_AFTER_EOF());
} else if (state.destroyed) {
return false;
} else {
state.reading = false;
if (state.decoder && !encoding) {
chunk = state.decoder.write(chunk);
if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state);
} else {
addChunk(stream, state, chunk, false);
}
}
} else if (!addToFront) {
state.reading = false;
maybeReadMore(stream, state);
}
} // We can push more data if we are below the highWaterMark.
// Also, if we have no data yet, we can stand some more bytes.
// This is to work around cases where hwm=0, such as the repl.
return !state.ended && (state.length < state.highWaterMark || state.length === 0);
}
function addChunk(stream, state, chunk, addToFront) {
if (state.flowing && state.length === 0 && !state.sync) {
state.awaitDrain = 0;
stream.emit('data', chunk);
} else {
// update the buffer info.
state.length += state.objectMode ? 1 : chunk.length;
if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk);
if (state.needReadable) emitReadable(stream);
}
maybeReadMore(stream, state);
}
function chunkInvalid(state, chunk) {
var er;
if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) {
er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer', 'Uint8Array'], chunk);
}
return er;
}
Readable.prototype.isPaused = function () {
return this._readableState.flowing === false;
}; // backwards compatibility.
Readable.prototype.setEncoding = function (enc) {
if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder;
var decoder = new StringDecoder(enc);
this._readableState.decoder = decoder; // If setEncoding(null), decoder.encoding equals utf8
this._readableState.encoding = this._readableState.decoder.encoding; // Iterate over current buffer to convert already stored Buffers:
var p = this._readableState.buffer.head;
var content = '';
while (p !== null) {
content += decoder.write(p.data);
p = p.next;
}
this._readableState.buffer.clear();
if (content !== '') this._readableState.buffer.push(content);
this._readableState.length = content.length;
return this;
}; // Don't raise the hwm > 1GB
var MAX_HWM = 0x40000000;
function computeNewHighWaterMark(n) {
if (n >= MAX_HWM) {
// TODO(ronag): Throw ERR_VALUE_OUT_OF_RANGE.
n = MAX_HWM;
} else {
// Get the next highest power of 2 to prevent increasing hwm excessively in
// tiny amounts
n--;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
n++;
}
return n;
} // This function is designed to be inlinable, so please take care when making
// changes to the function body.
function howMuchToRead(n, state) {
if (n <= 0 || state.length === 0 && state.ended) return 0;
if (state.objectMode) return 1;
if (n !== n) {
// Only flow one buffer at a time
if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length;
} // If we're asking for more than the current hwm, then raise the hwm.
if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n);
if (n <= state.length) return n; // Don't have enough
if (!state.ended) {
state.needReadable = true;
return 0;
}
return state.length;
} // you can override either this method, or the async _read(n) below.
Readable.prototype.read = function (n) {
debug('read', n);
n = parseInt(n, 10);
var state = this._readableState;
var nOrig = n;
if (n !== 0) state.emittedReadable = false; // if we're doing read(0) to trigger a readable event, but we
// already have a bunch of data in the buffer, then just trigger
// the 'readable' event and move on.
if (n === 0 && state.needReadable && ((state.highWaterMark !== 0 ? state.length >= state.highWaterMark : state.length > 0) || state.ended)) {
debug('read: emitReadable', state.length, state.ended);
if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this);
return null;
}
n = howMuchToRead(n, state); // if we've ended, and we're now clear, then finish it up.
if (n === 0 && state.ended) {
if (state.length === 0) endReadable(this);
return null;
} // All the actual chunk generation logic needs to be
// *below* the call to _read. The reason is that in certain
// synthetic stream cases, such as passthrough streams, _read
// may be a completely synchronous operation which may change
// the state of the read buffer, providing enough data when
// before there was *not* enough.
//
// So, the steps are:
// 1. Figure out what the state of things will be after we do
// a read from the buffer.
//
// 2. If that resulting state will trigger a _read, then call _read.
// Note that this may be asynchronous, or synchronous. Yes, it is
// deeply ugly to write APIs this way, but that still doesn't mean
// that the Readable class should behave improperly, as streams are
// designed to be sync/async agnostic.
// Take note if the _read call is sync or async (ie, if the read call
// has returned yet), so that we know whether or not it's safe to emit
// 'readable' etc.
//
// 3. Actually pull the requested chunks out of the buffer and return.
// if we need a readable event, then we need to do some reading.
var doRead = state.needReadable;
debug('need readable', doRead); // if we currently have less than the highWaterMark, then also read some
if (state.length === 0 || state.length - n < state.highWaterMark) {
doRead = true;
debug('length less than watermark', doRead);
} // however, if we've ended, then there's no point, and if we're already
// reading, then it's unnecessary.
if (state.ended || state.reading) {
doRead = false;
debug('reading or ended', doRead);
} else if (doRead) {
debug('do read');
state.reading = true;
state.sync = true; // if the length is currently zero, then we *need* a readable event.
if (state.length === 0) state.needReadable = true; // call internal read method
this._read(state.highWaterMark);
state.sync = false; // If _read pushed data synchronously, then `reading` will be false,
// and we need to re-evaluate how much data we can return to the user.
if (!state.reading) n = howMuchToRead(nOrig, state);
}
var ret;
if (n > 0) ret = fromList(n, state);else ret = null;
if (ret === null) {
state.needReadable = state.length <= state.highWaterMark;
n = 0;
} else {
state.length -= n;
state.awaitDrain = 0;
}
if (state.length === 0) {
// If we have nothing in the buffer, then we want to know
// as soon as we *do* get something into the buffer.
if (!state.ended) state.needReadable = true; // If we tried to read() past the EOF, then emit end on the next tick.
if (nOrig !== n && state.ended) endReadable(this);
}
if (ret !== null) this.emit('data', ret);
return ret;
};
function onEofChunk(stream, state) {
debug('onEofChunk');
if (state.ended) return;
if (state.decoder) {
var chunk = state.decoder.end();
if (chunk && chunk.length) {
state.buffer.push(chunk);
state.length += state.objectMode ? 1 : chunk.length;
}
}
state.ended = true;
if (state.sync) {
// if we are sync, wait until next tick to emit the data.
// Otherwise we risk emitting data in the flow()
// the readable code triggers during a read() call
emitReadable(stream);
} else {
// emit 'readable' now to make sure it gets picked up.
state.needReadable = false;
if (!state.emittedReadable) {
state.emittedReadable = true;
emitReadable_(stream);
}
}
} // Don't emit readable right away in sync mode, because this can trigger
// another read() call => stack overflow. This way, it might trigger
// a nextTick recursion warning, but that's not so bad.
function emitReadable(stream) {
var state = stream._readableState;
debug('emitReadable', state.needReadable, state.emittedReadable);
state.needReadable = false;
if (!state.emittedReadable) {
debug('emitReadable', state.flowing);
state.emittedReadable = true;
process.nextTick(emitReadable_, stream);
}
}
function emitReadable_(stream) {
var state = stream._readableState;
debug('emitReadable_', state.destroyed, state.length, state.ended);
if (!state.destroyed && (state.length || state.ended)) {
stream.emit('readable');
state.emittedReadable = false;
} // The stream needs another readable event if
// 1. It is not flowing, as the flow mechanism will take
// care of it.
// 2. It is not ended.
// 3. It is below the highWaterMark, so we can schedule
// another readable later.
state.needReadable = !state.flowing && !state.ended && state.length <= state.highWaterMark;
flow(stream);
} // at this point, the user has presumably seen the 'readable' event,
// and called read() to consume some data. that may have triggered
// in turn another _read(n) call, in which case reading = true if
// it's in progress.
// However, if we're not ended, or reading, and the length < hwm,
// then go ahead and try to read some more preemptively.
function maybeReadMore(stream, state) {
if (!state.readingMore) {
state.readingMore = true;
process.nextTick(maybeReadMore_, stream, state);
}
}
function maybeReadMore_(stream, state) {
// Attempt to read more data if we should.
//
// The conditions for reading more data are (one of):
// - Not enough data buffered (state.length < state.highWaterMark). The loop
// is responsible for filling the buffer with enough data if such data
// is available. If highWaterMark is 0 and we are not in the flowing mode
// we should _not_ attempt to buffer any extra data. We'll get more data
// when the stream consumer calls read() instead.
// - No data in the buffer, and the stream is in flowing mode. In this mode
// the loop below is responsible for ensuring read() is called. Failing to
// call read here would abort the flow and there's no other mechanism for
// continuing the flow if the stream consumer has just subscribed to the
// 'data' event.
//
// In addition to the above conditions to keep reading data, the following
// conditions prevent the data from being read:
// - The stream has ended (state.ended).
// - There is already a pending 'read' operation (state.reading). This is a
// case where the the stream has called the implementation defined _read()
// method, but they are processing the call asynchronously and have _not_
// called push() with new data. In this case we skip performing more
// read()s. The execution ends in this method again after the _read() ends
// up calling push() with more data.
while (!state.reading && !state.ended && (state.length < state.highWaterMark || state.flowing && state.length === 0)) {
var len = state.length;
debug('maybeReadMore read 0');
stream.read(0);
if (len === state.length) // didn't get any data, stop spinning.
break;
}
state.readingMore = false;
} // abstract method. to be overridden in specific implementation classes.
// call cb(er, data) where data is <= n in length.
// for virtual (non-string, non-buffer) streams, "length" is somewhat
// arbitrary, and perhaps not very meaningful.
Readable.prototype._read = function (n) {
errorOrDestroy(this, new ERR_METHOD_NOT_IMPLEMENTED('_read()'));
};
Readable.prototype.pipe = function (dest, pipeOpts) {
var src = this;
var state = this._readableState;
switch (state.pipesCount) {
case 0:
state.pipes = dest;
break;
case 1:
state.pipes = [state.pipes, dest];
break;
default:
state.pipes.push(dest);
break;
}
state.pipesCount += 1;
debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts);
var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr;
var endFn = doEnd ? onend : unpipe;
if (state.endEmitted) process.nextTick(endFn);else src.once('end', endFn);
dest.on('unpipe', onunpipe);
function onunpipe(readable, unpipeInfo) {
debug('onunpipe');
if (readable === src) {
if (unpipeInfo && unpipeInfo.hasUnpiped === false) {
unpipeInfo.hasUnpiped = true;
cleanup();
}
}
}
function onend() {
debug('onend');
dest.end();
} // when the dest drains, it reduces the awaitDrain counter
// on the source. This would be more elegant with a .once()
// handler in flow(), but adding and removing repeatedly is
// too slow.
var ondrain = pipeOnDrain(src);
dest.on('drain', ondrain);
var cleanedUp = false;
function cleanup() {
debug('cleanup'); // cleanup event handlers once the pipe is broken
dest.removeListener('close', onclose);
dest.removeListener('finish', onfinish);
dest.removeListener('drain', ondrain);
dest.removeListener('error', onerror);
dest.removeListener('unpipe', onunpipe);
src.removeListener('end', onend);
src.removeListener('end', unpipe);
src.removeListener('data', ondata);
cleanedUp = true; // if the reader is waiting for a drain event from this
// specific writer, then it would cause it to never start
// flowing again.
// So, if this is awaiting a drain, then we just call it now.
// If we don't know, then assume that we are waiting for one.
if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain();
}
src.on('data', ondata);
function ondata(chunk) {
debug('ondata');
var ret = dest.write(chunk);
debug('dest.write', ret);
if (ret === false) {
// If the user unpiped during `dest.write()`, it is possible
// to get stuck in a permanently paused state if that write
// also returned false.
// => Check whether `dest` is still a piping destination.
if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) {
debug('false write response, pause', state.awaitDrain);
state.awaitDrain++;
}
src.pause();
}
} // if the dest has an error, then stop piping into it.
// however, don't suppress the throwing behavior for this.
function onerror(er) {
debug('onerror', er);
unpipe();
dest.removeListener('error', onerror);
if (EElistenerCount(dest, 'error') === 0) errorOrDestroy(dest, er);
} // Make sure our error handler is attached before userland ones.
prependListener(dest, 'error', onerror); // Both close and finish should trigger unpipe, but only once.
function onclose() {
dest.removeListener('finish', onfinish);
unpipe();
}
dest.once('close', onclose);
function onfinish() {
debug('onfinish');
dest.removeListener('close', onclose);
unpipe();
}
dest.once('finish', onfinish);
function unpipe() {
debug('unpipe');
src.unpipe(dest);
} // tell the dest that it's being piped to
dest.emit('pipe', src); // start the flow if it hasn't been started already.
if (!state.flowing) {
debug('pipe resume');
src.resume();
}
return dest;
};
function pipeOnDrain(src) {
return function pipeOnDrainFunctionResult() {
var state = src._readableState;
debug('pipeOnDrain', state.awaitDrain);
if (state.awaitDrain) state.awaitDrain--;
if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) {
state.flowing = true;
flow(src);
}
};
}
Readable.prototype.unpipe = function (dest) {
var state = this._readableState;
var unpipeInfo = {
hasUnpiped: false
}; // if we're not piping anywhere, then do nothing.
if (state.pipesCount === 0) return this; // just one destination. most common case.
if (state.pipesCount === 1) {
// passed in one, but it's not the right one.
if (dest && dest !== state.pipes) return this;
if (!dest) dest = state.pipes; // got a match.
state.pipes = null;
state.pipesCount = 0;
state.flowing = false;
if (dest) dest.emit('unpipe', this, unpipeInfo);
return this;
} // slow case. multiple pipe destinations.
if (!dest) {
// remove all.
var dests = state.pipes;
var len = state.pipesCount;
state.pipes = null;
state.pipesCount = 0;
state.flowing = false;
for (var i = 0; i < len; i++) {
dests[i].emit('unpipe', this, {
hasUnpiped: false
});
}
return this;
} // try to find the right one.
var index = indexOf(state.pipes, dest);
if (index === -1) return this;
state.pipes.splice(index, 1);
state.pipesCount -= 1;
if (state.pipesCount === 1) state.pipes = state.pipes[0];
dest.emit('unpipe', this, unpipeInfo);
return this;
}; // set up data events if they are asked for
// Ensure readable listeners eventually get something
Readable.prototype.on = function (ev, fn) {
var res = Stream.prototype.on.call(this, ev, fn);
var state = this._readableState;
if (ev === 'data') {
// update readableListening so that resume() may be a no-op
// a few lines down. This is needed to support once('readable').
state.readableListening = this.listenerCount('readable') > 0; // Try start flowing on next tick if stream isn't explicitly paused
if (state.flowing !== false) this.resume();
} else if (ev === 'readable') {
if (!state.endEmitted && !state.readableListening) {
state.readableListening = state.needReadable = true;
state.flowing = false;
state.emittedReadable = false;
debug('on readable', state.length, state.reading);
if (state.length) {
emitReadable(this);
} else if (!state.reading) {
process.nextTick(nReadingNextTick, this);
}
}
}
return res;
};
Readable.prototype.addListener = Readable.prototype.on;
Readable.prototype.removeListener = function (ev, fn) {
var res = Stream.prototype.removeListener.call(this, ev, fn);
if (ev === 'readable') {
// We need to check if there is someone still listening to
// readable and reset the state. However this needs to happen
// after readable has been emitted but before I/O (nextTick) to
// support once('readable', fn) cycles. This means that calling
// resume within the same tick will have no
// effect.
process.nextTick(updateReadableListening, this);
}
return res;
};
Readable.prototype.removeAllListeners = function (ev) {
var res = Stream.prototype.removeAllListeners.apply(this, arguments);
if (ev === 'readable' || ev === undefined) {
// We need to check if there is someone still listening to
// readable and reset the state. However this needs to happen
// after readable has been emitted but before I/O (nextTick) to
// support once('readable', fn) cycles. This means that calling
// resume within the same tick will have no
// effect.
process.nextTick(updateReadableListening, this);
}
return res;
};
function updateReadableListening(self) {
var state = self._readableState;
state.readableListening = self.listenerCount('readable') > 0;
if (state.resumeScheduled && !state.paused) {
// flowing needs to be set to true now, otherwise
// the upcoming resume will not flow.
state.flowing = true; // crude way to check if we should resume
} else if (self.listenerCount('data') > 0) {
self.resume();
}
}
function nReadingNextTick(self) {
debug('readable nexttick read 0');
self.read(0);
} // pause() and resume() are remnants of the legacy readable stream API
// If the user uses them, then switch into old mode.
Readable.prototype.resume = function () {
var state = this._readableState;
if (!state.flowing) {
debug('resume'); // we flow only if there is no one listening
// for readable, but we still have to call
// resume()
state.flowing = !state.readableListening;
resume(this, state);
}
state.paused = false;
return this;
};
function resume(stream, state) {
if (!state.resumeScheduled) {
state.resumeScheduled = true;
process.nextTick(resume_, stream, state);
}
}
function resume_(stream, state) {
debug('resume', state.reading);
if (!state.reading) {
stream.read(0);
}
state.resumeScheduled = false;
stream.emit('resume');
flow(stream);
if (state.flowing && !state.reading) stream.read(0);
}
Readable.prototype.pause = function () {
debug('call pause flowing=%j', this._readableState.flowing);
if (this._readableState.flowing !== false) {
debug('pause');
this._readableState.flowing = false;
this.emit('pause');
}
this._readableState.paused = true;
return this;
};
function flow(stream) {
var state = stream._readableState;
debug('flow', state.flowing);
while (state.flowing && stream.read() !== null) {
;
}
} // wrap an old-style stream as the async data source.
// This is *not* part of the readable stream interface.
// It is an ugly unfortunate mess of history.
Readable.prototype.wrap = function (stream) {
var _this = this;
var state = this._readableState;
var paused = false;
stream.on('end', function () {
debug('wrapped end');
if (state.decoder && !state.ended) {
var chunk = state.decoder.end();
if (chunk && chunk.length) _this.push(chunk);
}
_this.push(null);
});
stream.on('data', function (chunk) {
debug('wrapped data');
if (state.decoder) chunk = state.decoder.write(chunk); // don't skip over falsy values in objectMode
if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return;
var ret = _this.push(chunk);
if (!ret) {
paused = true;
stream.pause();
}
}); // proxy all the other methods.
// important when wrapping filters and duplexes.
for (var i in stream) {
if (this[i] === undefined && typeof stream[i] === 'function') {
this[i] = function methodWrap(method) {
return function methodWrapReturnFunction() {
return stream[method].apply(stream, arguments);
};
}(i);
}
} // proxy certain important events.
for (var n = 0; n < kProxyEvents.length; n++) {
stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n]));
} // when we try to consume some more bytes, simply unpause the
// underlying stream.
this._read = function (n) {
debug('wrapped _read', n);
if (paused) {
paused = false;
stream.resume();
}
};
return this;
};
if (typeof Symbol === 'function') {
Readable.prototype[Symbol.asyncIterator] = function () {
if (createReadableStreamAsyncIterator === undefined) {
createReadableStreamAsyncIterator = require('./internal/streams/async_iterator');
}
return createReadableStreamAsyncIterator(this);
};
}
Object.defineProperty(Readable.prototype, 'readableHighWaterMark', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.highWaterMark;
}
});
Object.defineProperty(Readable.prototype, 'readableBuffer', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState && this._readableState.buffer;
}
});
Object.defineProperty(Readable.prototype, 'readableFlowing', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.flowing;
},
set: function set(state) {
if (this._readableState) {
this._readableState.flowing = state;
}
}
}); // exposed for testing purposes only.
Readable._fromList = fromList;
Object.defineProperty(Readable.prototype, 'readableLength', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.length;
}
}); // Pluck off n bytes from an array of buffers.
// Length is the combined lengths of all the buffers in the list.
// This function is designed to be inlinable, so please take care when making
// changes to the function body.
function fromList(n, state) {
// nothing buffered
if (state.length === 0) return null;
var ret;
if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) {
// read it all, truncate the list
if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.first();else ret = state.buffer.concat(state.length);
state.buffer.clear();
} else {
// read part of list
ret = state.buffer.consume(n, state.decoder);
}
return ret;
}
function endReadable(stream) {
var state = stream._readableState;
debug('endReadable', state.endEmitted);
if (!state.endEmitted) {
state.ended = true;
process.nextTick(endReadableNT, state, stream);
}
}
function endReadableNT(state, stream) {
debug('endReadableNT', state.endEmitted, state.length); // Check that we didn't get one last unshift.
if (!state.endEmitted && state.length === 0) {
state.endEmitted = true;
stream.readable = false;
stream.emit('end');
if (state.autoDestroy) {
// In case of duplex streams we need a way to detect
// if the writable side is ready for autoDestroy as well
var wState = stream._writableState;
if (!wState || wState.autoDestroy && wState.finished) {
stream.destroy();
}
}
}
}
if (typeof Symbol === 'function') {
Readable.from = function (iterable, opts) {
if (from === undefined) {
from = require('./internal/streams/from');
}
return from(Readable, iterable, opts);
};
}
function indexOf(xs, x) {
for (var i = 0, l = xs.length; i < l; i++) {
if (xs[i] === x) return i;
}
return -1;
}
}).call(this)}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{"../errors":11,"./_stream_duplex":12,"./internal/streams/async_iterator":17,"./internal/streams/buffer_list":18,"./internal/streams/destroy":19,"./internal/streams/from":21,"./internal/streams/state":23,"./internal/streams/stream":24,"_process":100,"buffer":25,"events":8,"inherits":95,"string_decoder/":107,"util":6}],15:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// a transform stream is a readable/writable stream where you do
// something with the data. Sometimes it's called a "filter",
// but that's not a great name for it, since that implies a thing where
// some bits pass through, and others are simply ignored. (That would
// be a valid example of a transform, of course.)
//
// While the output is causally related to the input, it's not a
// necessarily symmetric or synchronous transformation. For example,
// a zlib stream might take multiple plain-text writes(), and then
// emit a single compressed chunk some time in the future.
//
// Here's how this works:
//
// The Transform stream has all the aspects of the readable and writable
// stream classes. When you write(chunk), that calls _write(chunk,cb)
// internally, and returns false if there's a lot of pending writes
// buffered up. When you call read(), that calls _read(n) until
// there's enough pending readable data buffered up.
//
// In a transform stream, the written data is placed in a buffer. When
// _read(n) is called, it transforms the queued up data, calling the
// buffered _write cb's as it consumes chunks. If consuming a single
// written chunk would result in multiple output chunks, then the first
// outputted bit calls the readcb, and subsequent chunks just go into
// the read buffer, and will cause it to emit 'readable' if necessary.
//
// This way, back-pressure is actually determined by the reading side,
// since _read has to be called to start processing a new chunk. However,
// a pathological inflate type of transform can cause excessive buffering
// here. For example, imagine a stream where every byte of input is
// interpreted as an integer from 0-255, and then results in that many
// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in
// 1kb of data being output. In this case, you could write a very small
// amount of input, and end up with a very large amount of output. In
// such a pathological inflating mechanism, there'd be no way to tell
// the system to stop doing the transform. A single 4MB write could
// cause the system to run out of memory.
//
// However, even in such a pathological case, only a single written chunk
// would be consumed, and then the rest would wait (un-transformed) until
// the results of the previous transformed chunk were consumed.
'use strict';
module.exports = Transform;
var _require$codes = require('../errors').codes,
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK,
ERR_TRANSFORM_ALREADY_TRANSFORMING = _require$codes.ERR_TRANSFORM_ALREADY_TRANSFORMING,
ERR_TRANSFORM_WITH_LENGTH_0 = _require$codes.ERR_TRANSFORM_WITH_LENGTH_0;
var Duplex = require('./_stream_duplex');
require('inherits')(Transform, Duplex);
function afterTransform(er, data) {
var ts = this._transformState;
ts.transforming = false;
var cb = ts.writecb;
if (cb === null) {
return this.emit('error', new ERR_MULTIPLE_CALLBACK());
}
ts.writechunk = null;
ts.writecb = null;
if (data != null) // single equals check for both `null` and `undefined`
this.push(data);
cb(er);
var rs = this._readableState;
rs.reading = false;
if (rs.needReadable || rs.length < rs.highWaterMark) {
this._read(rs.highWaterMark);
}
}
function Transform(options) {
if (!(this instanceof Transform)) return new Transform(options);
Duplex.call(this, options);
this._transformState = {
afterTransform: afterTransform.bind(this),
needTransform: false,
transforming: false,
writecb: null,
writechunk: null,
writeencoding: null
}; // start out asking for a readable event once data is transformed.
this._readableState.needReadable = true; // we have implemented the _read method, and done the other things
// that Readable wants before the first _read call, so unset the
// sync guard flag.
this._readableState.sync = false;
if (options) {
if (typeof options.transform === 'function') this._transform = options.transform;
if (typeof options.flush === 'function') this._flush = options.flush;
} // When the writable side finishes, then flush out anything remaining.
this.on('prefinish', prefinish);
}
function prefinish() {
var _this = this;
if (typeof this._flush === 'function' && !this._readableState.destroyed) {
this._flush(function (er, data) {
done(_this, er, data);
});
} else {
done(this, null, null);
}
}
Transform.prototype.push = function (chunk, encoding) {
this._transformState.needTransform = false;
return Duplex.prototype.push.call(this, chunk, encoding);
}; // This is the part where you do stuff!
// override this function in implementation classes.
// 'chunk' is an input chunk.
//
// Call `push(newChunk)` to pass along transformed output
// to the readable side. You may call 'push' zero or more times.
//
// Call `cb(err)` when you are done with this chunk. If you pass
// an error, then that'll put the hurt on the whole operation. If you
// never call cb(), then you'll never get another chunk.
Transform.prototype._transform = function (chunk, encoding, cb) {
cb(new ERR_METHOD_NOT_IMPLEMENTED('_transform()'));
};
Transform.prototype._write = function (chunk, encoding, cb) {
var ts = this._transformState;
ts.writecb = cb;
ts.writechunk = chunk;
ts.writeencoding = encoding;
if (!ts.transforming) {
var rs = this._readableState;
if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark);
}
}; // Doesn't matter what the args are here.
// _transform does all the work.
// That we got here means that the readable side wants more data.
Transform.prototype._read = function (n) {
var ts = this._transformState;
if (ts.writechunk !== null && !ts.transforming) {
ts.transforming = true;
this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform);
} else {
// mark that we need a transform, so that any data that comes in
// will get processed, now that we've asked for it.
ts.needTransform = true;
}
};
Transform.prototype._destroy = function (err, cb) {
Duplex.prototype._destroy.call(this, err, function (err2) {
cb(err2);
});
};
function done(stream, er, data) {
if (er) return stream.emit('error', er);
if (data != null) // single equals check for both `null` and `undefined`
stream.push(data); // TODO(BridgeAR): Write a test for these two error cases
// if there's nothing in the write buffer, then that means
// that nothing more will ever be provided
if (stream._writableState.length) throw new ERR_TRANSFORM_WITH_LENGTH_0();
if (stream._transformState.transforming) throw new ERR_TRANSFORM_ALREADY_TRANSFORMING();
return stream.push(null);
}
},{"../errors":11,"./_stream_duplex":12,"inherits":95}],16:[function(require,module,exports){
(function (process,global){(function (){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// A bit simpler than readable streams.
// Implement an async ._write(chunk, encoding, cb), and it'll handle all
// the drain event emission and buffering.
'use strict';
module.exports = Writable;
/* <replacement> */
function WriteReq(chunk, encoding, cb) {
this.chunk = chunk;
this.encoding = encoding;
this.callback = cb;
this.next = null;
} // It seems a linked list but it is not
// there will be only 2 of these for each stream
function CorkedRequest(state) {
var _this = this;
this.next = null;
this.entry = null;
this.finish = function () {
onCorkedFinish(_this, state);
};
}
/* </replacement> */
/*<replacement>*/
var Duplex;
/*</replacement>*/
Writable.WritableState = WritableState;
/*<replacement>*/
var internalUtil = {
deprecate: require('util-deprecate')
};
/*</replacement>*/
/*<replacement>*/
var Stream = require('./internal/streams/stream');
/*</replacement>*/
var Buffer = require('buffer').Buffer;
var OurUint8Array = global.Uint8Array || function () {};
function _uint8ArrayToBuffer(chunk) {
return Buffer.from(chunk);
}
function _isUint8Array(obj) {
return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
}
var destroyImpl = require('./internal/streams/destroy');
var _require = require('./internal/streams/state'),
getHighWaterMark = _require.getHighWaterMark;
var _require$codes = require('../errors').codes,
ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK,
ERR_STREAM_CANNOT_PIPE = _require$codes.ERR_STREAM_CANNOT_PIPE,
ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED,
ERR_STREAM_NULL_VALUES = _require$codes.ERR_STREAM_NULL_VALUES,
ERR_STREAM_WRITE_AFTER_END = _require$codes.ERR_STREAM_WRITE_AFTER_END,
ERR_UNKNOWN_ENCODING = _require$codes.ERR_UNKNOWN_ENCODING;
var errorOrDestroy = destroyImpl.errorOrDestroy;
require('inherits')(Writable, Stream);
function nop() {}
function WritableState(options, stream, isDuplex) {
Duplex = Duplex || require('./_stream_duplex');
options = options || {}; // Duplex streams are both readable and writable, but share
// the same options object.
// However, some cases require setting options to different
// values for the readable and the writable sides of the duplex stream,
// e.g. options.readableObjectMode vs. options.writableObjectMode, etc.
if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; // object stream flag to indicate whether or not this stream
// contains buffers or objects.
this.objectMode = !!options.objectMode;
if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode; // the point at which write() starts returning false
// Note: 0 is a valid value, means that we always return false if
// the entire buffer is not flushed immediately on write()
this.highWaterMark = getHighWaterMark(this, options, 'writableHighWaterMark', isDuplex); // if _final has been called
this.finalCalled = false; // drain event flag.
this.needDrain = false; // at the start of calling end()
this.ending = false; // when end() has been called, and returned
this.ended = false; // when 'finish' is emitted
this.finished = false; // has it been destroyed
this.destroyed = false; // should we decode strings into buffers before passing to _write?
// this is here so that some node-core streams can optimize string
// handling at a lower level.
var noDecode = options.decodeStrings === false;
this.decodeStrings = !noDecode; // Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8'; // not an actual buffer we keep track of, but a measurement
// of how much we're waiting to get pushed to some underlying
// socket or file.
this.length = 0; // a flag to see when we're in the middle of a write.
this.writing = false; // when true all writes will be buffered until .uncork() call
this.corked = 0; // a flag to be able to tell if the onwrite cb is called immediately,
// or on a later tick. We set this to true at first, because any
// actions that shouldn't happen until "later" should generally also
// not happen before the first write call.
this.sync = true; // a flag to know if we're processing previously buffered items, which
// may call the _write() callback in the same tick, so that we don't
// end up in an overlapped onwrite situation.
this.bufferProcessing = false; // the callback that's passed to _write(chunk,cb)
this.onwrite = function (er) {
onwrite(stream, er);
}; // the callback that the user supplies to write(chunk,encoding,cb)
this.writecb = null; // the amount that is being written when _write is called.
this.writelen = 0;
this.bufferedRequest = null;
this.lastBufferedRequest = null; // number of pending user-supplied write callbacks
// this must be 0 before 'finish' can be emitted
this.pendingcb = 0; // emit prefinish if the only thing we're waiting for is _write cbs
// This is relevant for synchronous Transform streams
this.prefinished = false; // True if the error was already emitted and should not be thrown again
this.errorEmitted = false; // Should close be emitted on destroy. Defaults to true.
this.emitClose = options.emitClose !== false; // Should .destroy() be called after 'finish' (and potentially 'end')
this.autoDestroy = !!options.autoDestroy; // count buffered requests
this.bufferedRequestCount = 0; // allocate the first CorkedRequest, there is always
// one allocated and free to use, and we maintain at most two
this.corkedRequestsFree = new CorkedRequest(this);
}
WritableState.prototype.getBuffer = function getBuffer() {
var current = this.bufferedRequest;
var out = [];
while (current) {
out.push(current);
current = current.next;
}
return out;
};
(function () {
try {
Object.defineProperty(WritableState.prototype, 'buffer', {
get: internalUtil.deprecate(function writableStateBufferGetter() {
return this.getBuffer();
}, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003')
});
} catch (_) {}
})(); // Test _writableState for inheritance to account for Duplex streams,
// whose prototype chain only points to Readable.
var realHasInstance;
if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') {
realHasInstance = Function.prototype[Symbol.hasInstance];
Object.defineProperty(Writable, Symbol.hasInstance, {
value: function value(object) {
if (realHasInstance.call(this, object)) return true;
if (this !== Writable) return false;
return object && object._writableState instanceof WritableState;
}
});
} else {
realHasInstance = function realHasInstance(object) {
return object instanceof this;
};
}
function Writable(options) {
Duplex = Duplex || require('./_stream_duplex'); // Writable ctor is applied to Duplexes, too.
// `realHasInstance` is necessary because using plain `instanceof`
// would return false, as no `_writableState` property is attached.
// Trying to use the custom `instanceof` for Writable here will also break the
// Node.js LazyTransform implementation, which has a non-trivial getter for
// `_writableState` that would lead to infinite recursion.
// Checking for a Stream.Duplex instance is faster here instead of inside
// the WritableState constructor, at least with V8 6.5
var isDuplex = this instanceof Duplex;
if (!isDuplex && !realHasInstance.call(Writable, this)) return new Writable(options);
this._writableState = new WritableState(options, this, isDuplex); // legacy.
this.writable = true;
if (options) {
if (typeof options.write === 'function') this._write = options.write;
if (typeof options.writev === 'function') this._writev = options.writev;
if (typeof options.destroy === 'function') this._destroy = options.destroy;
if (typeof options.final === 'function') this._final = options.final;
}
Stream.call(this);
} // Otherwise people can pipe Writable streams, which is just wrong.
Writable.prototype.pipe = function () {
errorOrDestroy(this, new ERR_STREAM_CANNOT_PIPE());
};
function writeAfterEnd(stream, cb) {
var er = new ERR_STREAM_WRITE_AFTER_END(); // TODO: defer error events consistently everywhere, not just the cb
errorOrDestroy(stream, er);
process.nextTick(cb, er);
} // Checks that a user-supplied chunk is valid, especially for the particular
// mode the stream is in. Currently this means that `null` is never accepted
// and undefined/non-string values are only allowed in object mode.
function validChunk(stream, state, chunk, cb) {
var er;
if (chunk === null) {
er = new ERR_STREAM_NULL_VALUES();
} else if (typeof chunk !== 'string' && !state.objectMode) {
er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer'], chunk);
}
if (er) {
errorOrDestroy(stream, er);
process.nextTick(cb, er);
return false;
}
return true;
}
Writable.prototype.write = function (chunk, encoding, cb) {
var state = this._writableState;
var ret = false;
var isBuf = !state.objectMode && _isUint8Array(chunk);
if (isBuf && !Buffer.isBuffer(chunk)) {
chunk = _uint8ArrayToBuffer(chunk);
}
if (typeof encoding === 'function') {
cb = encoding;
encoding = null;
}
if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding;
if (typeof cb !== 'function') cb = nop;
if (state.ending) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) {
state.pendingcb++;
ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb);
}
return ret;
};
Writable.prototype.cork = function () {
this._writableState.corked++;
};
Writable.prototype.uncork = function () {
var state = this._writableState;
if (state.corked) {
state.corked--;
if (!state.writing && !state.corked && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state);
}
};
Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) {
// node::ParseEncoding() requires lower case.
if (typeof encoding === 'string') encoding = encoding.toLowerCase();
if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new ERR_UNKNOWN_ENCODING(encoding);
this._writableState.defaultEncoding = encoding;
return this;
};
Object.defineProperty(Writable.prototype, 'writableBuffer', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState && this._writableState.getBuffer();
}
});
function decodeChunk(state, chunk, encoding) {
if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') {
chunk = Buffer.from(chunk, encoding);
}
return chunk;
}
Object.defineProperty(Writable.prototype, 'writableHighWaterMark', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.highWaterMark;
}
}); // if we're already writing something, then just put this
// in the queue, and wait our turn. Otherwise, call _write
// If we return false, then we need a drain event, so set that flag.
function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) {
if (!isBuf) {
var newChunk = decodeChunk(state, chunk, encoding);
if (chunk !== newChunk) {
isBuf = true;
encoding = 'buffer';
chunk = newChunk;
}
}
var len = state.objectMode ? 1 : chunk.length;
state.length += len;
var ret = state.length < state.highWaterMark; // we must ensure that previous needDrain will not be reset to false.
if (!ret) state.needDrain = true;
if (state.writing || state.corked) {
var last = state.lastBufferedRequest;
state.lastBufferedRequest = {
chunk: chunk,
encoding: encoding,
isBuf: isBuf,
callback: cb,
next: null
};
if (last) {
last.next = state.lastBufferedRequest;
} else {
state.bufferedRequest = state.lastBufferedRequest;
}
state.bufferedRequestCount += 1;
} else {
doWrite(stream, state, false, len, chunk, encoding, cb);
}
return ret;
}
function doWrite(stream, state, writev, len, chunk, encoding, cb) {
state.writelen = len;
state.writecb = cb;
state.writing = true;
state.sync = true;
if (state.destroyed) state.onwrite(new ERR_STREAM_DESTROYED('write'));else if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite);
state.sync = false;
}
function onwriteError(stream, state, sync, er, cb) {
--state.pendingcb;
if (sync) {
// defer the callback if we are being called synchronously
// to avoid piling up things on the stack
process.nextTick(cb, er); // this can emit finish, and it will always happen
// after error
process.nextTick(finishMaybe, stream, state);
stream._writableState.errorEmitted = true;
errorOrDestroy(stream, er);
} else {
// the caller expect this to happen before if
// it is async
cb(er);
stream._writableState.errorEmitted = true;
errorOrDestroy(stream, er); // this can emit finish, but finish must
// always follow error
finishMaybe(stream, state);
}
}
function onwriteStateUpdate(state) {
state.writing = false;
state.writecb = null;
state.length -= state.writelen;
state.writelen = 0;
}
function onwrite(stream, er) {
var state = stream._writableState;
var sync = state.sync;
var cb = state.writecb;
if (typeof cb !== 'function') throw new ERR_MULTIPLE_CALLBACK();
onwriteStateUpdate(state);
if (er) onwriteError(stream, state, sync, er, cb);else {
// Check if we're actually ready to finish, but don't emit yet
var finished = needFinish(state) || stream.destroyed;
if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) {
clearBuffer(stream, state);
}
if (sync) {
process.nextTick(afterWrite, stream, state, finished, cb);
} else {
afterWrite(stream, state, finished, cb);
}
}
}
function afterWrite(stream, state, finished, cb) {
if (!finished) onwriteDrain(stream, state);
state.pendingcb--;
cb();
finishMaybe(stream, state);
} // Must force callback to be called on nextTick, so that we don't
// emit 'drain' before the write() consumer gets the 'false' return
// value, and has a chance to attach a 'drain' listener.
function onwriteDrain(stream, state) {
if (state.length === 0 && state.needDrain) {
state.needDrain = false;
stream.emit('drain');
}
} // if there's something in the buffer waiting, then process it
function clearBuffer(stream, state) {
state.bufferProcessing = true;
var entry = state.bufferedRequest;
if (stream._writev && entry && entry.next) {
// Fast case, write everything using _writev()
var l = state.bufferedRequestCount;
var buffer = new Array(l);
var holder = state.corkedRequestsFree;
holder.entry = entry;
var count = 0;
var allBuffers = true;
while (entry) {
buffer[count] = entry;
if (!entry.isBuf) allBuffers = false;
entry = entry.next;
count += 1;
}
buffer.allBuffers = allBuffers;
doWrite(stream, state, true, state.length, buffer, '', holder.finish); // doWrite is almost always async, defer these to save a bit of time
// as the hot path ends with doWrite
state.pendingcb++;
state.lastBufferedRequest = null;
if (holder.next) {
state.corkedRequestsFree = holder.next;
holder.next = null;
} else {
state.corkedRequestsFree = new CorkedRequest(state);
}
state.bufferedRequestCount = 0;
} else {
// Slow case, write chunks one-by-one
while (entry) {
var chunk = entry.chunk;
var encoding = entry.encoding;
var cb = entry.callback;
var len = state.objectMode ? 1 : chunk.length;
doWrite(stream, state, false, len, chunk, encoding, cb);
entry = entry.next;
state.bufferedRequestCount--; // if we didn't call the onwrite immediately, then
// it means that we need to wait until it does.
// also, that means that the chunk and cb are currently
// being processed, so move the buffer counter past them.
if (state.writing) {
break;
}
}
if (entry === null) state.lastBufferedRequest = null;
}
state.bufferedRequest = entry;
state.bufferProcessing = false;
}
Writable.prototype._write = function (chunk, encoding, cb) {
cb(new ERR_METHOD_NOT_IMPLEMENTED('_write()'));
};
Writable.prototype._writev = null;
Writable.prototype.end = function (chunk, encoding, cb) {
var state = this._writableState;
if (typeof chunk === 'function') {
cb = chunk;
chunk = null;
encoding = null;
} else if (typeof encoding === 'function') {
cb = encoding;
encoding = null;
}
if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); // .end() fully uncorks
if (state.corked) {
state.corked = 1;
this.uncork();
} // ignore unnecessary end() calls.
if (!state.ending) endWritable(this, state, cb);
return this;
};
Object.defineProperty(Writable.prototype, 'writableLength', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.length;
}
});
function needFinish(state) {
return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing;
}
function callFinal(stream, state) {
stream._final(function (err) {
state.pendingcb--;
if (err) {
errorOrDestroy(stream, err);
}
state.prefinished = true;
stream.emit('prefinish');
finishMaybe(stream, state);
});
}
function prefinish(stream, state) {
if (!state.prefinished && !state.finalCalled) {
if (typeof stream._final === 'function' && !state.destroyed) {
state.pendingcb++;
state.finalCalled = true;
process.nextTick(callFinal, stream, state);
} else {
state.prefinished = true;
stream.emit('prefinish');
}
}
}
function finishMaybe(stream, state) {
var need = needFinish(state);
if (need) {
prefinish(stream, state);
if (state.pendingcb === 0) {
state.finished = true;
stream.emit('finish');
if (state.autoDestroy) {
// In case of duplex streams we need a way to detect
// if the readable side is ready for autoDestroy as well
var rState = stream._readableState;
if (!rState || rState.autoDestroy && rState.endEmitted) {
stream.destroy();
}
}
}
}
return need;
}
function endWritable(stream, state, cb) {
state.ending = true;
finishMaybe(stream, state);
if (cb) {
if (state.finished) process.nextTick(cb);else stream.once('finish', cb);
}
state.ended = true;
stream.writable = false;
}
function onCorkedFinish(corkReq, state, err) {
var entry = corkReq.entry;
corkReq.entry = null;
while (entry) {
var cb = entry.callback;
state.pendingcb--;
cb(err);
entry = entry.next;
} // reuse the free corkReq.
state.corkedRequestsFree.next = corkReq;
}
Object.defineProperty(Writable.prototype, 'destroyed', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
if (this._writableState === undefined) {
return false;
}
return this._writableState.destroyed;
},
set: function set(value) {
// we ignore the value if the stream
// has not been initialized yet
if (!this._writableState) {
return;
} // backward compatibility, the user is explicitly
// managing destroyed
this._writableState.destroyed = value;
}
});
Writable.prototype.destroy = destroyImpl.destroy;
Writable.prototype._undestroy = destroyImpl.undestroy;
Writable.prototype._destroy = function (err, cb) {
cb(err);
};
}).call(this)}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{"../errors":11,"./_stream_duplex":12,"./internal/streams/destroy":19,"./internal/streams/state":23,"./internal/streams/stream":24,"_process":100,"buffer":25,"inherits":95,"util-deprecate":116}],17:[function(require,module,exports){
(function (process){(function (){
'use strict';
var _Object$setPrototypeO;
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
var finished = require('./end-of-stream');
var kLastResolve = Symbol('lastResolve');
var kLastReject = Symbol('lastReject');
var kError = Symbol('error');
var kEnded = Symbol('ended');
var kLastPromise = Symbol('lastPromise');
var kHandlePromise = Symbol('handlePromise');
var kStream = Symbol('stream');
function createIterResult(value, done) {
return {
value: value,
done: done
};
}
function readAndResolve(iter) {
var resolve = iter[kLastResolve];
if (resolve !== null) {
var data = iter[kStream].read(); // we defer if data is null
// we can be expecting either 'end' or
// 'error'
if (data !== null) {
iter[kLastPromise] = null;
iter[kLastResolve] = null;
iter[kLastReject] = null;
resolve(createIterResult(data, false));
}
}
}
function onReadable(iter) {
// we wait for the next tick, because it might
// emit an error with process.nextTick
process.nextTick(readAndResolve, iter);
}
function wrapForNext(lastPromise, iter) {
return function (resolve, reject) {
lastPromise.then(function () {
if (iter[kEnded]) {
resolve(createIterResult(undefined, true));
return;
}
iter[kHandlePromise](resolve, reject);
}, reject);
};
}
var AsyncIteratorPrototype = Object.getPrototypeOf(function () {});
var ReadableStreamAsyncIteratorPrototype = Object.setPrototypeOf((_Object$setPrototypeO = {
get stream() {
return this[kStream];
},
next: function next() {
var _this = this;
// if we have detected an error in the meanwhile
// reject straight away
var error = this[kError];
if (error !== null) {
return Promise.reject(error);
}
if (this[kEnded]) {
return Promise.resolve(createIterResult(undefined, true));
}
if (this[kStream].destroyed) {
// We need to defer via nextTick because if .destroy(err) is
// called, the error will be emitted via nextTick, and
// we cannot guarantee that there is no error lingering around
// waiting to be emitted.
return new Promise(function (resolve, reject) {
process.nextTick(function () {
if (_this[kError]) {
reject(_this[kError]);
} else {
resolve(createIterResult(undefined, true));
}
});
});
} // if we have multiple next() calls
// we will wait for the previous Promise to finish
// this logic is optimized to support for await loops,
// where next() is only called once at a time
var lastPromise = this[kLastPromise];
var promise;
if (lastPromise) {
promise = new Promise(wrapForNext(lastPromise, this));
} else {
// fast path needed to support multiple this.push()
// without triggering the next() queue
var data = this[kStream].read();
if (data !== null) {
return Promise.resolve(createIterResult(data, false));
}
promise = new Promise(this[kHandlePromise]);
}
this[kLastPromise] = promise;
return promise;
}
}, _defineProperty(_Object$setPrototypeO, Symbol.asyncIterator, function () {
return this;
}), _defineProperty(_Object$setPrototypeO, "return", function _return() {
var _this2 = this;
// destroy(err, cb) is a private API
// we can guarantee we have that here, because we control the
// Readable class this is attached to
return new Promise(function (resolve, reject) {
_this2[kStream].destroy(null, function (err) {
if (err) {
reject(err);
return;
}
resolve(createIterResult(undefined, true));
});
});
}), _Object$setPrototypeO), AsyncIteratorPrototype);
var createReadableStreamAsyncIterator = function createReadableStreamAsyncIterator(stream) {
var _Object$create;
var iterator = Object.create(ReadableStreamAsyncIteratorPrototype, (_Object$create = {}, _defineProperty(_Object$create, kStream, {
value: stream,
writable: true
}), _defineProperty(_Object$create, kLastResolve, {
value: null,
writable: true
}), _defineProperty(_Object$create, kLastReject, {
value: null,
writable: true
}), _defineProperty(_Object$create, kError, {
value: null,
writable: true
}), _defineProperty(_Object$create, kEnded, {
value: stream._readableState.endEmitted,
writable: true
}), _defineProperty(_Object$create, kHandlePromise, {
value: function value(resolve, reject) {
var data = iterator[kStream].read();
if (data) {
iterator[kLastPromise] = null;
iterator[kLastResolve] = null;
iterator[kLastReject] = null;
resolve(createIterResult(data, false));
} else {
iterator[kLastResolve] = resolve;
iterator[kLastReject] = reject;
}
},
writable: true
}), _Object$create));
iterator[kLastPromise] = null;
finished(stream, function (err) {
if (err && err.code !== 'ERR_STREAM_PREMATURE_CLOSE') {
var reject = iterator[kLastReject]; // reject if we are waiting for data in the Promise
// returned by next() and store the error
if (reject !== null) {
iterator[kLastPromise] = null;
iterator[kLastResolve] = null;
iterator[kLastReject] = null;
reject(err);
}
iterator[kError] = err;
return;
}
var resolve = iterator[kLastResolve];
if (resolve !== null) {
iterator[kLastPromise] = null;
iterator[kLastResolve] = null;
iterator[kLastReject] = null;
resolve(createIterResult(undefined, true));
}
iterator[kEnded] = true;
});
stream.on('readable', onReadable.bind(null, iterator));
return iterator;
};
module.exports = createReadableStreamAsyncIterator;
}).call(this)}).call(this,require('_process'))
},{"./end-of-stream":20,"_process":100}],18:[function(require,module,exports){
'use strict';
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; }
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { _defineProperty(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }
function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }
var _require = require('buffer'),
Buffer = _require.Buffer;
var _require2 = require('util'),
inspect = _require2.inspect;
var custom = inspect && inspect.custom || 'inspect';
function copyBuffer(src, target, offset) {
Buffer.prototype.copy.call(src, target, offset);
}
module.exports =
/*#__PURE__*/
function () {
function BufferList() {
_classCallCheck(this, BufferList);
this.head = null;
this.tail = null;
this.length = 0;
}
_createClass(BufferList, [{
key: "push",
value: function push(v) {
var entry = {
data: v,
next: null
};
if (this.length > 0) this.tail.next = entry;else this.head = entry;
this.tail = entry;
++this.length;
}
}, {
key: "unshift",
value: function unshift(v) {
var entry = {
data: v,
next: this.head
};
if (this.length === 0) this.tail = entry;
this.head = entry;
++this.length;
}
}, {
key: "shift",
value: function shift() {
if (this.length === 0) return;
var ret = this.head.data;
if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next;
--this.length;
return ret;
}
}, {
key: "clear",
value: function clear() {
this.head = this.tail = null;
this.length = 0;
}
}, {
key: "join",
value: function join(s) {
if (this.length === 0) return '';
var p = this.head;
var ret = '' + p.data;
while (p = p.next) {
ret += s + p.data;
}
return ret;
}
}, {
key: "concat",
value: function concat(n) {
if (this.length === 0) return Buffer.alloc(0);
var ret = Buffer.allocUnsafe(n >>> 0);
var p = this.head;
var i = 0;
while (p) {
copyBuffer(p.data, ret, i);
i += p.data.length;
p = p.next;
}
return ret;
} // Consumes a specified amount of bytes or characters from the buffered data.
}, {
key: "consume",
value: function consume(n, hasStrings) {
var ret;
if (n < this.head.data.length) {
// `slice` is the same for buffers and strings.
ret = this.head.data.slice(0, n);
this.head.data = this.head.data.slice(n);
} else if (n === this.head.data.length) {
// First chunk is a perfect match.
ret = this.shift();
} else {
// Result spans more than one buffer.
ret = hasStrings ? this._getString(n) : this._getBuffer(n);
}
return ret;
}
}, {
key: "first",
value: function first() {
return this.head.data;
} // Consumes a specified amount of characters from the buffered data.
}, {
key: "_getString",
value: function _getString(n) {
var p = this.head;
var c = 1;
var ret = p.data;
n -= ret.length;
while (p = p.next) {
var str = p.data;
var nb = n > str.length ? str.length : n;
if (nb === str.length) ret += str;else ret += str.slice(0, n);
n -= nb;
if (n === 0) {
if (nb === str.length) {
++c;
if (p.next) this.head = p.next;else this.head = this.tail = null;
} else {
this.head = p;
p.data = str.slice(nb);
}
break;
}
++c;
}
this.length -= c;
return ret;
} // Consumes a specified amount of bytes from the buffered data.
}, {
key: "_getBuffer",
value: function _getBuffer(n) {
var ret = Buffer.allocUnsafe(n);
var p = this.head;
var c = 1;
p.data.copy(ret);
n -= p.data.length;
while (p = p.next) {
var buf = p.data;
var nb = n > buf.length ? buf.length : n;
buf.copy(ret, ret.length - n, 0, nb);
n -= nb;
if (n === 0) {
if (nb === buf.length) {
++c;
if (p.next) this.head = p.next;else this.head = this.tail = null;
} else {
this.head = p;
p.data = buf.slice(nb);
}
break;
}
++c;
}
this.length -= c;
return ret;
} // Make sure the linked list only shows the minimal necessary information.
}, {
key: custom,
value: function value(_, options) {
return inspect(this, _objectSpread({}, options, {
// Only inspect one level.
depth: 0,
// It should not recurse.
customInspect: false
}));
}
}]);
return BufferList;
}();
},{"buffer":25,"util":6}],19:[function(require,module,exports){
(function (process){(function (){
'use strict'; // undocumented cb() API, needed for core, not for public API
function destroy(err, cb) {
var _this = this;
var readableDestroyed = this._readableState && this._readableState.destroyed;
var writableDestroyed = this._writableState && this._writableState.destroyed;
if (readableDestroyed || writableDestroyed) {
if (cb) {
cb(err);
} else if (err) {
if (!this._writableState) {
process.nextTick(emitErrorNT, this, err);
} else if (!this._writableState.errorEmitted) {
this._writableState.errorEmitted = true;
process.nextTick(emitErrorNT, this, err);
}
}
return this;
} // we set destroyed to true before firing error callbacks in order
// to make it re-entrance safe in case destroy() is called within callbacks
if (this._readableState) {
this._readableState.destroyed = true;
} // if this is a duplex stream mark the writable part as destroyed as well
if (this._writableState) {
this._writableState.destroyed = true;
}
this._destroy(err || null, function (err) {
if (!cb && err) {
if (!_this._writableState) {
process.nextTick(emitErrorAndCloseNT, _this, err);
} else if (!_this._writableState.errorEmitted) {
_this._writableState.errorEmitted = true;
process.nextTick(emitErrorAndCloseNT, _this, err);
} else {
process.nextTick(emitCloseNT, _this);
}
} else if (cb) {
process.nextTick(emitCloseNT, _this);
cb(err);
} else {
process.nextTick(emitCloseNT, _this);
}
});
return this;
}
function emitErrorAndCloseNT(self, err) {
emitErrorNT(self, err);
emitCloseNT(self);
}
function emitCloseNT(self) {
if (self._writableState && !self._writableState.emitClose) return;
if (self._readableState && !self._readableState.emitClose) return;
self.emit('close');
}
function undestroy() {
if (this._readableState) {
this._readableState.destroyed = false;
this._readableState.reading = false;
this._readableState.ended = false;
this._readableState.endEmitted = false;
}
if (this._writableState) {
this._writableState.destroyed = false;
this._writableState.ended = false;
this._writableState.ending = false;
this._writableState.finalCalled = false;
this._writableState.prefinished = false;
this._writableState.finished = false;
this._writableState.errorEmitted = false;
}
}
function emitErrorNT(self, err) {
self.emit('error', err);
}
function errorOrDestroy(stream, err) {
// We have tests that rely on errors being emitted
// in the same tick, so changing this is semver major.
// For now when you opt-in to autoDestroy we allow
// the error to be emitted nextTick. In a future
// semver major update we should change the default to this.
var rState = stream._readableState;
var wState = stream._writableState;
if (rState && rState.autoDestroy || wState && wState.autoDestroy) stream.destroy(err);else stream.emit('error', err);
}
module.exports = {
destroy: destroy,
undestroy: undestroy,
errorOrDestroy: errorOrDestroy
};
}).call(this)}).call(this,require('_process'))
},{"_process":100}],20:[function(require,module,exports){
// Ported from https://github.com/mafintosh/end-of-stream with
// permission from the author, Mathias Buus (@mafintosh).
'use strict';
var ERR_STREAM_PREMATURE_CLOSE = require('../../../errors').codes.ERR_STREAM_PREMATURE_CLOSE;
function once(callback) {
var called = false;
return function () {
if (called) return;
called = true;
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {
args[_key] = arguments[_key];
}
callback.apply(this, args);
};
}
function noop() {}
function isRequest(stream) {
return stream.setHeader && typeof stream.abort === 'function';
}
function eos(stream, opts, callback) {
if (typeof opts === 'function') return eos(stream, null, opts);
if (!opts) opts = {};
callback = once(callback || noop);
var readable = opts.readable || opts.readable !== false && stream.readable;
var writable = opts.writable || opts.writable !== false && stream.writable;
var onlegacyfinish = function onlegacyfinish() {
if (!stream.writable) onfinish();
};
var writableEnded = stream._writableState && stream._writableState.finished;
var onfinish = function onfinish() {
writable = false;
writableEnded = true;
if (!readable) callback.call(stream);
};
var readableEnded = stream._readableState && stream._readableState.endEmitted;
var onend = function onend() {
readable = false;
readableEnded = true;
if (!writable) callback.call(stream);
};
var onerror = function onerror(err) {
callback.call(stream, err);
};
var onclose = function onclose() {
var err;
if (readable && !readableEnded) {
if (!stream._readableState || !stream._readableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();
return callback.call(stream, err);
}
if (writable && !writableEnded) {
if (!stream._writableState || !stream._writableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();
return callback.call(stream, err);
}
};
var onrequest = function onrequest() {
stream.req.on('finish', onfinish);
};
if (isRequest(stream)) {
stream.on('complete', onfinish);
stream.on('abort', onclose);
if (stream.req) onrequest();else stream.on('request', onrequest);
} else if (writable && !stream._writableState) {
// legacy streams
stream.on('end', onlegacyfinish);
stream.on('close', onlegacyfinish);
}
stream.on('end', onend);
stream.on('finish', onfinish);
if (opts.error !== false) stream.on('error', onerror);
stream.on('close', onclose);
return function () {
stream.removeListener('complete', onfinish);
stream.removeListener('abort', onclose);
stream.removeListener('request', onrequest);
if (stream.req) stream.req.removeListener('finish', onfinish);
stream.removeListener('end', onlegacyfinish);
stream.removeListener('close', onlegacyfinish);
stream.removeListener('finish', onfinish);
stream.removeListener('end', onend);
stream.removeListener('error', onerror);
stream.removeListener('close', onclose);
};
}
module.exports = eos;
},{"../../../errors":11}],21:[function(require,module,exports){
module.exports = function () {
throw new Error('Readable.from is not available in the browser')
};
},{}],22:[function(require,module,exports){
// Ported from https://github.com/mafintosh/pump with
// permission from the author, Mathias Buus (@mafintosh).
'use strict';
var eos;
function once(callback) {
var called = false;
return function () {
if (called) return;
called = true;
callback.apply(void 0, arguments);
};
}
var _require$codes = require('../../../errors').codes,
ERR_MISSING_ARGS = _require$codes.ERR_MISSING_ARGS,
ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED;
function noop(err) {
// Rethrow the error if it exists to avoid swallowing it
if (err) throw err;
}
function isRequest(stream) {
return stream.setHeader && typeof stream.abort === 'function';
}
function destroyer(stream, reading, writing, callback) {
callback = once(callback);
var closed = false;
stream.on('close', function () {
closed = true;
});
if (eos === undefined) eos = require('./end-of-stream');
eos(stream, {
readable: reading,
writable: writing
}, function (err) {
if (err) return callback(err);
closed = true;
callback();
});
var destroyed = false;
return function (err) {
if (closed) return;
if (destroyed) return;
destroyed = true; // request.destroy just do .end - .abort is what we want
if (isRequest(stream)) return stream.abort();
if (typeof stream.destroy === 'function') return stream.destroy();
callback(err || new ERR_STREAM_DESTROYED('pipe'));
};
}
function call(fn) {
fn();
}
function pipe(from, to) {
return from.pipe(to);
}
function popCallback(streams) {
if (!streams.length) return noop;
if (typeof streams[streams.length - 1] !== 'function') return noop;
return streams.pop();
}
function pipeline() {
for (var _len = arguments.length, streams = new Array(_len), _key = 0; _key < _len; _key++) {
streams[_key] = arguments[_key];
}
var callback = popCallback(streams);
if (Array.isArray(streams[0])) streams = streams[0];
if (streams.length < 2) {
throw new ERR_MISSING_ARGS('streams');
}
var error;
var destroys = streams.map(function (stream, i) {
var reading = i < streams.length - 1;
var writing = i > 0;
return destroyer(stream, reading, writing, function (err) {
if (!error) error = err;
if (err) destroys.forEach(call);
if (reading) return;
destroys.forEach(call);
callback(error);
});
});
return streams.reduce(pipe);
}
module.exports = pipeline;
},{"../../../errors":11,"./end-of-stream":20}],23:[function(require,module,exports){
'use strict';
var ERR_INVALID_OPT_VALUE = require('../../../errors').codes.ERR_INVALID_OPT_VALUE;
function highWaterMarkFrom(options, isDuplex, duplexKey) {
return options.highWaterMark != null ? options.highWaterMark : isDuplex ? options[duplexKey] : null;
}
function getHighWaterMark(state, options, duplexKey, isDuplex) {
var hwm = highWaterMarkFrom(options, isDuplex, duplexKey);
if (hwm != null) {
if (!(isFinite(hwm) && Math.floor(hwm) === hwm) || hwm < 0) {
var name = isDuplex ? duplexKey : 'highWaterMark';
throw new ERR_INVALID_OPT_VALUE(name, hwm);
}
return Math.floor(hwm);
} // Default value
return state.objectMode ? 16 : 16 * 1024;
}
module.exports = {
getHighWaterMark: getHighWaterMark
};
},{"../../../errors":11}],24:[function(require,module,exports){
module.exports = require('events').EventEmitter;
},{"events":8}],25:[function(require,module,exports){
(function (Buffer){(function (){
/*!
* The buffer module from node.js, for the browser.
*
* @author Feross Aboukhadijeh <https://feross.org>
* @license MIT
*/
/* eslint-disable no-proto */
'use strict'
var base64 = require('base64-js')
var ieee754 = require('ieee754')
exports.Buffer = Buffer
exports.SlowBuffer = SlowBuffer
exports.INSPECT_MAX_BYTES = 50
var K_MAX_LENGTH = 0x7fffffff
exports.kMaxLength = K_MAX_LENGTH
/**
* If `Buffer.TYPED_ARRAY_SUPPORT`:
* === true Use Uint8Array implementation (fastest)
* === false Print warning and recommend using `buffer` v4.x which has an Object
* implementation (most compatible, even IE6)
*
* Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,
* Opera 11.6+, iOS 4.2+.
*
* We report that the browser does not support typed arrays if the are not subclassable
* using __proto__. Firefox 4-29 lacks support for adding new properties to `Uint8Array`
* (See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438). IE 10 lacks support
* for __proto__ and has a buggy typed array implementation.
*/
Buffer.TYPED_ARRAY_SUPPORT = typedArraySupport()
if (!Buffer.TYPED_ARRAY_SUPPORT && typeof console !== 'undefined' &&
typeof console.error === 'function') {
console.error(
'This browser lacks typed array (Uint8Array) support which is required by ' +
'`buffer` v5.x. Use `buffer` v4.x if you require old browser support.'
)
}
function typedArraySupport () {
// Can typed array instances can be augmented?
try {
var arr = new Uint8Array(1)
arr.__proto__ = { __proto__: Uint8Array.prototype, foo: function () { return 42 } }
return arr.foo() === 42
} catch (e) {
return false
}
}
Object.defineProperty(Buffer.prototype, 'parent', {
enumerable: true,
get: function () {
if (!Buffer.isBuffer(this)) return undefined
return this.buffer
}
})
Object.defineProperty(Buffer.prototype, 'offset', {
enumerable: true,
get: function () {
if (!Buffer.isBuffer(this)) return undefined
return this.byteOffset
}
})
function createBuffer (length) {
if (length > K_MAX_LENGTH) {
throw new RangeError('The value "' + length + '" is invalid for option "size"')
}
// Return an augmented `Uint8Array` instance
var buf = new Uint8Array(length)
buf.__proto__ = Buffer.prototype
return buf
}
/**
* The Buffer constructor returns instances of `Uint8Array` that have their
* prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of
* `Uint8Array`, so the returned instances will have all the node `Buffer` methods
* and the `Uint8Array` methods. Square bracket notation works as expected -- it
* returns a single octet.
*
* The `Uint8Array` prototype remains unmodified.
*/
function Buffer (arg, encodingOrOffset, length) {
// Common case.
if (typeof arg === 'number') {
if (typeof encodingOrOffset === 'string') {
throw new TypeError(
'The "string" argument must be of type string. Received type number'
)
}
return allocUnsafe(arg)
}
return from(arg, encodingOrOffset, length)
}
// Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97
if (typeof Symbol !== 'undefined' && Symbol.species != null &&
Buffer[Symbol.species] === Buffer) {
Object.defineProperty(Buffer, Symbol.species, {
value: null,
configurable: true,
enumerable: false,
writable: false
})
}
Buffer.poolSize = 8192 // not used by this implementation
function from (value, encodingOrOffset, length) {
if (typeof value === 'string') {
return fromString(value, encodingOrOffset)
}
if (ArrayBuffer.isView(value)) {
return fromArrayLike(value)
}
if (value == null) {
throw TypeError(
'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' +
'or Array-like Object. Received type ' + (typeof value)
)
}
if (isInstance(value, ArrayBuffer) ||
(value && isInstance(value.buffer, ArrayBuffer))) {
return fromArrayBuffer(value, encodingOrOffset, length)
}
if (typeof value === 'number') {
throw new TypeError(
'The "value" argument must not be of type number. Received type number'
)
}
var valueOf = value.valueOf && value.valueOf()
if (valueOf != null && valueOf !== value) {
return Buffer.from(valueOf, encodingOrOffset, length)
}
var b = fromObject(value)
if (b) return b
if (typeof Symbol !== 'undefined' && Symbol.toPrimitive != null &&
typeof value[Symbol.toPrimitive] === 'function') {
return Buffer.from(
value[Symbol.toPrimitive]('string'), encodingOrOffset, length
)
}
throw new TypeError(
'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' +
'or Array-like Object. Received type ' + (typeof value)
)
}
/**
* Functionally equivalent to Buffer(arg, encoding) but throws a TypeError
* if value is a number.
* Buffer.from(str[, encoding])
* Buffer.from(array)
* Buffer.from(buffer)
* Buffer.from(arrayBuffer[, byteOffset[, length]])
**/
Buffer.from = function (value, encodingOrOffset, length) {
return from(value, encodingOrOffset, length)
}
// Note: Change prototype *after* Buffer.from is defined to workaround Chrome bug:
// https://github.com/feross/buffer/pull/148
Buffer.prototype.__proto__ = Uint8Array.prototype
Buffer.__proto__ = Uint8Array
function assertSize (size) {
if (typeof size !== 'number') {
throw new TypeError('"size" argument must be of type number')
} else if (size < 0) {
throw new RangeError('The value "' + size + '" is invalid for option "size"')
}
}
function alloc (size, fill, encoding) {
assertSize(size)
if (size <= 0) {
return createBuffer(size)
}
if (fill !== undefined) {
// Only pay attention to encoding if it's a string. This
// prevents accidentally sending in a number that would
// be interpretted as a start offset.
return typeof encoding === 'string'
? createBuffer(size).fill(fill, encoding)
: createBuffer(size).fill(fill)
}
return createBuffer(size)
}
/**
* Creates a new filled Buffer instance.
* alloc(size[, fill[, encoding]])
**/
Buffer.alloc = function (size, fill, encoding) {
return alloc(size, fill, encoding)
}
function allocUnsafe (size) {
assertSize(size)
return createBuffer(size < 0 ? 0 : checked(size) | 0)
}
/**
* Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance.
* */
Buffer.allocUnsafe = function (size) {
return allocUnsafe(size)
}
/**
* Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance.
*/
Buffer.allocUnsafeSlow = function (size) {
return allocUnsafe(size)
}
function fromString (string, encoding) {
if (typeof encoding !== 'string' || encoding === '') {
encoding = 'utf8'
}
if (!Buffer.isEncoding(encoding)) {
throw new TypeError('Unknown encoding: ' + encoding)
}
var length = byteLength(string, encoding) | 0
var buf = createBuffer(length)
var actual = buf.write(string, encoding)
if (actual !== length) {
// Writing a hex string, for example, that contains invalid characters will
// cause everything after the first invalid character to be ignored. (e.g.
// 'abxxcd' will be treated as 'ab')
buf = buf.slice(0, actual)
}
return buf
}
function fromArrayLike (array) {
var length = array.length < 0 ? 0 : checked(array.length) | 0
var buf = createBuffer(length)
for (var i = 0; i < length; i += 1) {
buf[i] = array[i] & 255
}
return buf
}
function fromArrayBuffer (array, byteOffset, length) {
if (byteOffset < 0 || array.byteLength < byteOffset) {
throw new RangeError('"offset" is outside of buffer bounds')
}
if (array.byteLength < byteOffset + (length || 0)) {
throw new RangeError('"length" is outside of buffer bounds')
}
var buf
if (byteOffset === undefined && length === undefined) {
buf = new Uint8Array(array)
} else if (length === undefined) {
buf = new Uint8Array(array, byteOffset)
} else {
buf = new Uint8Array(array, byteOffset, length)
}
// Return an augmented `Uint8Array` instance
buf.__proto__ = Buffer.prototype
return buf
}
function fromObject (obj) {
if (Buffer.isBuffer(obj)) {
var len = checked(obj.length) | 0
var buf = createBuffer(len)
if (buf.length === 0) {
return buf
}
obj.copy(buf, 0, 0, len)
return buf
}
if (obj.length !== undefined) {
if (typeof obj.length !== 'number' || numberIsNaN(obj.length)) {
return createBuffer(0)
}
return fromArrayLike(obj)
}
if (obj.type === 'Buffer' && Array.isArray(obj.data)) {
return fromArrayLike(obj.data)
}
}
function checked (length) {
// Note: cannot use `length < K_MAX_LENGTH` here because that fails when
// length is NaN (which is otherwise coerced to zero.)
if (length >= K_MAX_LENGTH) {
throw new RangeError('Attempt to allocate Buffer larger than maximum ' +
'size: 0x' + K_MAX_LENGTH.toString(16) + ' bytes')
}
return length | 0
}
function SlowBuffer (length) {
if (+length != length) { // eslint-disable-line eqeqeq
length = 0
}
return Buffer.alloc(+length)
}
Buffer.isBuffer = function isBuffer (b) {
return b != null && b._isBuffer === true &&
b !== Buffer.prototype // so Buffer.isBuffer(Buffer.prototype) will be false
}
Buffer.compare = function compare (a, b) {
if (isInstance(a, Uint8Array)) a = Buffer.from(a, a.offset, a.byteLength)
if (isInstance(b, Uint8Array)) b = Buffer.from(b, b.offset, b.byteLength)
if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) {
throw new TypeError(
'The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array'
)
}
if (a === b) return 0
var x = a.length
var y = b.length
for (var i = 0, len = Math.min(x, y); i < len; ++i) {
if (a[i] !== b[i]) {
x = a[i]
y = b[i]
break
}
}
if (x < y) return -1
if (y < x) return 1
return 0
}
Buffer.isEncoding = function isEncoding (encoding) {
switch (String(encoding).toLowerCase()) {
case 'hex':
case 'utf8':
case 'utf-8':
case 'ascii':
case 'latin1':
case 'binary':
case 'base64':
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return true
default:
return false
}
}
Buffer.concat = function concat (list, length) {
if (!Array.isArray(list)) {
throw new TypeError('"list" argument must be an Array of Buffers')
}
if (list.length === 0) {
return Buffer.alloc(0)
}
var i
if (length === undefined) {
length = 0
for (i = 0; i < list.length; ++i) {
length += list[i].length
}
}
var buffer = Buffer.allocUnsafe(length)
var pos = 0
for (i = 0; i < list.length; ++i) {
var buf = list[i]
if (isInstance(buf, Uint8Array)) {
buf = Buffer.from(buf)
}
if (!Buffer.isBuffer(buf)) {
throw new TypeError('"list" argument must be an Array of Buffers')
}
buf.copy(buffer, pos)
pos += buf.length
}
return buffer
}
function byteLength (string, encoding) {
if (Buffer.isBuffer(string)) {
return string.length
}
if (ArrayBuffer.isView(string) || isInstance(string, ArrayBuffer)) {
return string.byteLength
}
if (typeof string !== 'string') {
throw new TypeError(
'The "string" argument must be one of type string, Buffer, or ArrayBuffer. ' +
'Received type ' + typeof string
)
}
var len = string.length
var mustMatch = (arguments.length > 2 && arguments[2] === true)
if (!mustMatch && len === 0) return 0
// Use a for loop to avoid recursion
var loweredCase = false
for (;;) {
switch (encoding) {
case 'ascii':
case 'latin1':
case 'binary':
return len
case 'utf8':
case 'utf-8':
return utf8ToBytes(string).length
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return len * 2
case 'hex':
return len >>> 1
case 'base64':
return base64ToBytes(string).length
default:
if (loweredCase) {
return mustMatch ? -1 : utf8ToBytes(string).length // assume utf8
}
encoding = ('' + encoding).toLowerCase()
loweredCase = true
}
}
}
Buffer.byteLength = byteLength
function slowToString (encoding, start, end) {
var loweredCase = false
// No need to verify that "this.length <= MAX_UINT32" since it's a read-only
// property of a typed array.
// This behaves neither like String nor Uint8Array in that we set start/end
// to their upper/lower bounds if the value passed is out of range.
// undefined is handled specially as per ECMA-262 6th Edition,
// Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.
if (start === undefined || start < 0) {
start = 0
}
// Return early if start > this.length. Done here to prevent potential uint32
// coercion fail below.
if (start > this.length) {
return ''
}
if (end === undefined || end > this.length) {
end = this.length
}
if (end <= 0) {
return ''
}
// Force coersion to uint32. This will also coerce falsey/NaN values to 0.
end >>>= 0
start >>>= 0
if (end <= start) {
return ''
}
if (!encoding) encoding = 'utf8'
while (true) {
switch (encoding) {
case 'hex':
return hexSlice(this, start, end)
case 'utf8':
case 'utf-8':
return utf8Slice(this, start, end)
case 'ascii':
return asciiSlice(this, start, end)
case 'latin1':
case 'binary':
return latin1Slice(this, start, end)
case 'base64':
return base64Slice(this, start, end)
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return utf16leSlice(this, start, end)
default:
if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)
encoding = (encoding + '').toLowerCase()
loweredCase = true
}
}
}
// This property is used by `Buffer.isBuffer` (and the `is-buffer` npm package)
// to detect a Buffer instance. It's not possible to use `instanceof Buffer`
// reliably in a browserify context because there could be multiple different
// copies of the 'buffer' package in use. This method works even for Buffer
// instances that were created from another copy of the `buffer` package.
// See: https://github.com/feross/buffer/issues/154
Buffer.prototype._isBuffer = true
function swap (b, n, m) {
var i = b[n]
b[n] = b[m]
b[m] = i
}
Buffer.prototype.swap16 = function swap16 () {
var len = this.length
if (len % 2 !== 0) {
throw new RangeError('Buffer size must be a multiple of 16-bits')
}
for (var i = 0; i < len; i += 2) {
swap(this, i, i + 1)
}
return this
}
Buffer.prototype.swap32 = function swap32 () {
var len = this.length
if (len % 4 !== 0) {
throw new RangeError('Buffer size must be a multiple of 32-bits')
}
for (var i = 0; i < len; i += 4) {
swap(this, i, i + 3)
swap(this, i + 1, i + 2)
}
return this
}
Buffer.prototype.swap64 = function swap64 () {
var len = this.length
if (len % 8 !== 0) {
throw new RangeError('Buffer size must be a multiple of 64-bits')
}
for (var i = 0; i < len; i += 8) {
swap(this, i, i + 7)
swap(this, i + 1, i + 6)
swap(this, i + 2, i + 5)
swap(this, i + 3, i + 4)
}
return this
}
Buffer.prototype.toString = function toString () {
var length = this.length
if (length === 0) return ''
if (arguments.length === 0) return utf8Slice(this, 0, length)
return slowToString.apply(this, arguments)
}
Buffer.prototype.toLocaleString = Buffer.prototype.toString
Buffer.prototype.equals = function equals (b) {
if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer')
if (this === b) return true
return Buffer.compare(this, b) === 0
}
Buffer.prototype.inspect = function inspect () {
var str = ''
var max = exports.INSPECT_MAX_BYTES
str = this.toString('hex', 0, max).replace(/(.{2})/g, '$1 ').trim()
if (this.length > max) str += ' ... '
return '<Buffer ' + str + '>'
}
Buffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) {
if (isInstance(target, Uint8Array)) {
target = Buffer.from(target, target.offset, target.byteLength)
}
if (!Buffer.isBuffer(target)) {
throw new TypeError(
'The "target" argument must be one of type Buffer or Uint8Array. ' +
'Received type ' + (typeof target)
)
}
if (start === undefined) {
start = 0
}
if (end === undefined) {
end = target ? target.length : 0
}
if (thisStart === undefined) {
thisStart = 0
}
if (thisEnd === undefined) {
thisEnd = this.length
}
if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) {
throw new RangeError('out of range index')
}
if (thisStart >= thisEnd && start >= end) {
return 0
}
if (thisStart >= thisEnd) {
return -1
}
if (start >= end) {
return 1
}
start >>>= 0
end >>>= 0
thisStart >>>= 0
thisEnd >>>= 0
if (this === target) return 0
var x = thisEnd - thisStart
var y = end - start
var len = Math.min(x, y)
var thisCopy = this.slice(thisStart, thisEnd)
var targetCopy = target.slice(start, end)
for (var i = 0; i < len; ++i) {
if (thisCopy[i] !== targetCopy[i]) {
x = thisCopy[i]
y = targetCopy[i]
break
}
}
if (x < y) return -1
if (y < x) return 1
return 0
}
// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,
// OR the last index of `val` in `buffer` at offset <= `byteOffset`.
//
// Arguments:
// - buffer - a Buffer to search
// - val - a string, Buffer, or number
// - byteOffset - an index into `buffer`; will be clamped to an int32
// - encoding - an optional encoding, relevant is val is a string
// - dir - true for indexOf, false for lastIndexOf
function bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) {
// Empty buffer means no match
if (buffer.length === 0) return -1
// Normalize byteOffset
if (typeof byteOffset === 'string') {
encoding = byteOffset
byteOffset = 0
} else if (byteOffset > 0x7fffffff) {
byteOffset = 0x7fffffff
} else if (byteOffset < -0x80000000) {
byteOffset = -0x80000000
}
byteOffset = +byteOffset // Coerce to Number.
if (numberIsNaN(byteOffset)) {
// byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer
byteOffset = dir ? 0 : (buffer.length - 1)
}
// Normalize byteOffset: negative offsets start from the end of the buffer
if (byteOffset < 0) byteOffset = buffer.length + byteOffset
if (byteOffset >= buffer.length) {
if (dir) return -1
else byteOffset = buffer.length - 1
} else if (byteOffset < 0) {
if (dir) byteOffset = 0
else return -1
}
// Normalize val
if (typeof val === 'string') {
val = Buffer.from(val, encoding)
}
// Finally, search either indexOf (if dir is true) or lastIndexOf
if (Buffer.isBuffer(val)) {
// Special case: looking for empty string/buffer always fails
if (val.length === 0) {
return -1
}
return arrayIndexOf(buffer, val, byteOffset, encoding, dir)
} else if (typeof val === 'number') {
val = val & 0xFF // Search for a byte value [0-255]
if (typeof Uint8Array.prototype.indexOf === 'function') {
if (dir) {
return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset)
} else {
return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset)
}
}
return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir)
}
throw new TypeError('val must be string, number or Buffer')
}
function arrayIndexOf (arr, val, byteOffset, encoding, dir) {
var indexSize = 1
var arrLength = arr.length
var valLength = val.length
if (encoding !== undefined) {
encoding = String(encoding).toLowerCase()
if (encoding === 'ucs2' || encoding === 'ucs-2' ||
encoding === 'utf16le' || encoding === 'utf-16le') {
if (arr.length < 2 || val.length < 2) {
return -1
}
indexSize = 2
arrLength /= 2
valLength /= 2
byteOffset /= 2
}
}
function read (buf, i) {
if (indexSize === 1) {
return buf[i]
} else {
return buf.readUInt16BE(i * indexSize)
}
}
var i
if (dir) {
var foundIndex = -1
for (i = byteOffset; i < arrLength; i++) {
if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) {
if (foundIndex === -1) foundIndex = i
if (i - foundIndex + 1 === valLength) return foundIndex * indexSize
} else {
if (foundIndex !== -1) i -= i - foundIndex
foundIndex = -1
}
}
} else {
if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength
for (i = byteOffset; i >= 0; i--) {
var found = true
for (var j = 0; j < valLength; j++) {
if (read(arr, i + j) !== read(val, j)) {
found = false
break
}
}
if (found) return i
}
}
return -1
}
Buffer.prototype.includes = function includes (val, byteOffset, encoding) {
return this.indexOf(val, byteOffset, encoding) !== -1
}
Buffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) {
return bidirectionalIndexOf(this, val, byteOffset, encoding, true)
}
Buffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) {
return bidirectionalIndexOf(this, val, byteOffset, encoding, false)
}
function hexWrite (buf, string, offset, length) {
offset = Number(offset) || 0
var remaining = buf.length - offset
if (!length) {
length = remaining
} else {
length = Number(length)
if (length > remaining) {
length = remaining
}
}
var strLen = string.length
if (length > strLen / 2) {
length = strLen / 2
}
for (var i = 0; i < length; ++i) {
var parsed = parseInt(string.substr(i * 2, 2), 16)
if (numberIsNaN(parsed)) return i
buf[offset + i] = parsed
}
return i
}
function utf8Write (buf, string, offset, length) {
return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length)
}
function asciiWrite (buf, string, offset, length) {
return blitBuffer(asciiToBytes(string), buf, offset, length)
}
function latin1Write (buf, string, offset, length) {
return asciiWrite(buf, string, offset, length)
}
function base64Write (buf, string, offset, length) {
return blitBuffer(base64ToBytes(string), buf, offset, length)
}
function ucs2Write (buf, string, offset, length) {
return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length)
}
Buffer.prototype.write = function write (string, offset, length, encoding) {
// Buffer#write(string)
if (offset === undefined) {
encoding = 'utf8'
length = this.length
offset = 0
// Buffer#write(string, encoding)
} else if (length === undefined && typeof offset === 'string') {
encoding = offset
length = this.length
offset = 0
// Buffer#write(string, offset[, length][, encoding])
} else if (isFinite(offset)) {
offset = offset >>> 0
if (isFinite(length)) {
length = length >>> 0
if (encoding === undefined) encoding = 'utf8'
} else {
encoding = length
length = undefined
}
} else {
throw new Error(
'Buffer.write(string, encoding, offset[, length]) is no longer supported'
)
}
var remaining = this.length - offset
if (length === undefined || length > remaining) length = remaining
if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) {
throw new RangeError('Attempt to write outside buffer bounds')
}
if (!encoding) encoding = 'utf8'
var loweredCase = false
for (;;) {
switch (encoding) {
case 'hex':
return hexWrite(this, string, offset, length)
case 'utf8':
case 'utf-8':
return utf8Write(this, string, offset, length)
case 'ascii':
return asciiWrite(this, string, offset, length)
case 'latin1':
case 'binary':
return latin1Write(this, string, offset, length)
case 'base64':
// Warning: maxLength not taken into account in base64Write
return base64Write(this, string, offset, length)
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return ucs2Write(this, string, offset, length)
default:
if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)
encoding = ('' + encoding).toLowerCase()
loweredCase = true
}
}
}
Buffer.prototype.toJSON = function toJSON () {
return {
type: 'Buffer',
data: Array.prototype.slice.call(this._arr || this, 0)
}
}
function base64Slice (buf, start, end) {
if (start === 0 && end === buf.length) {
return base64.fromByteArray(buf)
} else {
return base64.fromByteArray(buf.slice(start, end))
}
}
function utf8Slice (buf, start, end) {
end = Math.min(buf.length, end)
var res = []
var i = start
while (i < end) {
var firstByte = buf[i]
var codePoint = null
var bytesPerSequence = (firstByte > 0xEF) ? 4
: (firstByte > 0xDF) ? 3
: (firstByte > 0xBF) ? 2
: 1
if (i + bytesPerSequence <= end) {
var secondByte, thirdByte, fourthByte, tempCodePoint
switch (bytesPerSequence) {
case 1:
if (firstByte < 0x80) {
codePoint = firstByte
}
break
case 2:
secondByte = buf[i + 1]
if ((secondByte & 0xC0) === 0x80) {
tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F)
if (tempCodePoint > 0x7F) {
codePoint = tempCodePoint
}
}
break
case 3:
secondByte = buf[i + 1]
thirdByte = buf[i + 2]
if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) {
tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F)
if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) {
codePoint = tempCodePoint
}
}
break
case 4:
secondByte = buf[i + 1]
thirdByte = buf[i + 2]
fourthByte = buf[i + 3]
if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) {
tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F)
if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) {
codePoint = tempCodePoint
}
}
}
}
if (codePoint === null) {
// we did not generate a valid codePoint so insert a
// replacement char (U+FFFD) and advance only 1 byte
codePoint = 0xFFFD
bytesPerSequence = 1
} else if (codePoint > 0xFFFF) {
// encode to utf16 (surrogate pair dance)
codePoint -= 0x10000
res.push(codePoint >>> 10 & 0x3FF | 0xD800)
codePoint = 0xDC00 | codePoint & 0x3FF
}
res.push(codePoint)
i += bytesPerSequence
}
return decodeCodePointsArray(res)
}
// Based on http://stackoverflow.com/a/22747272/680742, the browser with
// the lowest limit is Chrome, with 0x10000 args.
// We go 1 magnitude less, for safety
var MAX_ARGUMENTS_LENGTH = 0x1000
function decodeCodePointsArray (codePoints) {
var len = codePoints.length
if (len <= MAX_ARGUMENTS_LENGTH) {
return String.fromCharCode.apply(String, codePoints) // avoid extra slice()
}
// Decode in chunks to avoid "call stack size exceeded".
var res = ''
var i = 0
while (i < len) {
res += String.fromCharCode.apply(
String,
codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH)
)
}
return res
}
function asciiSlice (buf, start, end) {
var ret = ''
end = Math.min(buf.length, end)
for (var i = start; i < end; ++i) {
ret += String.fromCharCode(buf[i] & 0x7F)
}
return ret
}
function latin1Slice (buf, start, end) {
var ret = ''
end = Math.min(buf.length, end)
for (var i = start; i < end; ++i) {
ret += String.fromCharCode(buf[i])
}
return ret
}
function hexSlice (buf, start, end) {
var len = buf.length
if (!start || start < 0) start = 0
if (!end || end < 0 || end > len) end = len
var out = ''
for (var i = start; i < end; ++i) {
out += toHex(buf[i])
}
return out
}
function utf16leSlice (buf, start, end) {
var bytes = buf.slice(start, end)
var res = ''
for (var i = 0; i < bytes.length; i += 2) {
res += String.fromCharCode(bytes[i] + (bytes[i + 1] * 256))
}
return res
}
Buffer.prototype.slice = function slice (start, end) {
var len = this.length
start = ~~start
end = end === undefined ? len : ~~end
if (start < 0) {
start += len
if (start < 0) start = 0
} else if (start > len) {
start = len
}
if (end < 0) {
end += len
if (end < 0) end = 0
} else if (end > len) {
end = len
}
if (end < start) end = start
var newBuf = this.subarray(start, end)
// Return an augmented `Uint8Array` instance
newBuf.__proto__ = Buffer.prototype
return newBuf
}
/*
* Need to make sure that buffer isn't trying to write out of bounds.
*/
function checkOffset (offset, ext, length) {
if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint')
if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length')
}
Buffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) {
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) checkOffset(offset, byteLength, this.length)
var val = this[offset]
var mul = 1
var i = 0
while (++i < byteLength && (mul *= 0x100)) {
val += this[offset + i] * mul
}
return val
}
Buffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) {
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) {
checkOffset(offset, byteLength, this.length)
}
var val = this[offset + --byteLength]
var mul = 1
while (byteLength > 0 && (mul *= 0x100)) {
val += this[offset + --byteLength] * mul
}
return val
}
Buffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 1, this.length)
return this[offset]
}
Buffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 2, this.length)
return this[offset] | (this[offset + 1] << 8)
}
Buffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 2, this.length)
return (this[offset] << 8) | this[offset + 1]
}
Buffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return ((this[offset]) |
(this[offset + 1] << 8) |
(this[offset + 2] << 16)) +
(this[offset + 3] * 0x1000000)
}
Buffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return (this[offset] * 0x1000000) +
((this[offset + 1] << 16) |
(this[offset + 2] << 8) |
this[offset + 3])
}
Buffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) {
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) checkOffset(offset, byteLength, this.length)
var val = this[offset]
var mul = 1
var i = 0
while (++i < byteLength && (mul *= 0x100)) {
val += this[offset + i] * mul
}
mul *= 0x80
if (val >= mul) val -= Math.pow(2, 8 * byteLength)
return val
}
Buffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) {
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) checkOffset(offset, byteLength, this.length)
var i = byteLength
var mul = 1
var val = this[offset + --i]
while (i > 0 && (mul *= 0x100)) {
val += this[offset + --i] * mul
}
mul *= 0x80
if (val >= mul) val -= Math.pow(2, 8 * byteLength)
return val
}
Buffer.prototype.readInt8 = function readInt8 (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 1, this.length)
if (!(this[offset] & 0x80)) return (this[offset])
return ((0xff - this[offset] + 1) * -1)
}
Buffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 2, this.length)
var val = this[offset] | (this[offset + 1] << 8)
return (val & 0x8000) ? val | 0xFFFF0000 : val
}
Buffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 2, this.length)
var val = this[offset + 1] | (this[offset] << 8)
return (val & 0x8000) ? val | 0xFFFF0000 : val
}
Buffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return (this[offset]) |
(this[offset + 1] << 8) |
(this[offset + 2] << 16) |
(this[offset + 3] << 24)
}
Buffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return (this[offset] << 24) |
(this[offset + 1] << 16) |
(this[offset + 2] << 8) |
(this[offset + 3])
}
Buffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return ieee754.read(this, offset, true, 23, 4)
}
Buffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 4, this.length)
return ieee754.read(this, offset, false, 23, 4)
}
Buffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 8, this.length)
return ieee754.read(this, offset, true, 52, 8)
}
Buffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) {
offset = offset >>> 0
if (!noAssert) checkOffset(offset, 8, this.length)
return ieee754.read(this, offset, false, 52, 8)
}
function checkInt (buf, value, offset, ext, max, min) {
if (!Buffer.isBuffer(buf)) throw new TypeError('"buffer" argument must be a Buffer instance')
if (value > max || value < min) throw new RangeError('"value" argument is out of bounds')
if (offset + ext > buf.length) throw new RangeError('Index out of range')
}
Buffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) {
value = +value
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) {
var maxBytes = Math.pow(2, 8 * byteLength) - 1
checkInt(this, value, offset, byteLength, maxBytes, 0)
}
var mul = 1
var i = 0
this[offset] = value & 0xFF
while (++i < byteLength && (mul *= 0x100)) {
this[offset + i] = (value / mul) & 0xFF
}
return offset + byteLength
}
Buffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) {
value = +value
offset = offset >>> 0
byteLength = byteLength >>> 0
if (!noAssert) {
var maxBytes = Math.pow(2, 8 * byteLength) - 1
checkInt(this, value, offset, byteLength, maxBytes, 0)
}
var i = byteLength - 1
var mul = 1
this[offset + i] = value & 0xFF
while (--i >= 0 && (mul *= 0x100)) {
this[offset + i] = (value / mul) & 0xFF
}
return offset + byteLength
}
Buffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0)
this[offset] = (value & 0xff)
return offset + 1
}
Buffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)
this[offset] = (value & 0xff)
this[offset + 1] = (value >>> 8)
return offset + 2
}
Buffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)
this[offset] = (value >>> 8)
this[offset + 1] = (value & 0xff)
return offset + 2
}
Buffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)
this[offset + 3] = (value >>> 24)
this[offset + 2] = (value >>> 16)
this[offset + 1] = (value >>> 8)
this[offset] = (value & 0xff)
return offset + 4
}
Buffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)
this[offset] = (value >>> 24)
this[offset + 1] = (value >>> 16)
this[offset + 2] = (value >>> 8)
this[offset + 3] = (value & 0xff)
return offset + 4
}
Buffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) {
var limit = Math.pow(2, (8 * byteLength) - 1)
checkInt(this, value, offset, byteLength, limit - 1, -limit)
}
var i = 0
var mul = 1
var sub = 0
this[offset] = value & 0xFF
while (++i < byteLength && (mul *= 0x100)) {
if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) {
sub = 1
}
this[offset + i] = ((value / mul) >> 0) - sub & 0xFF
}
return offset + byteLength
}
Buffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) {
var limit = Math.pow(2, (8 * byteLength) - 1)
checkInt(this, value, offset, byteLength, limit - 1, -limit)
}
var i = byteLength - 1
var mul = 1
var sub = 0
this[offset + i] = value & 0xFF
while (--i >= 0 && (mul *= 0x100)) {
if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) {
sub = 1
}
this[offset + i] = ((value / mul) >> 0) - sub & 0xFF
}
return offset + byteLength
}
Buffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80)
if (value < 0) value = 0xff + value + 1
this[offset] = (value & 0xff)
return offset + 1
}
Buffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)
this[offset] = (value & 0xff)
this[offset + 1] = (value >>> 8)
return offset + 2
}
Buffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)
this[offset] = (value >>> 8)
this[offset + 1] = (value & 0xff)
return offset + 2
}
Buffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)
this[offset] = (value & 0xff)
this[offset + 1] = (value >>> 8)
this[offset + 2] = (value >>> 16)
this[offset + 3] = (value >>> 24)
return offset + 4
}
Buffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)
if (value < 0) value = 0xffffffff + value + 1
this[offset] = (value >>> 24)
this[offset + 1] = (value >>> 16)
this[offset + 2] = (value >>> 8)
this[offset + 3] = (value & 0xff)
return offset + 4
}
function checkIEEE754 (buf, value, offset, ext, max, min) {
if (offset + ext > buf.length) throw new RangeError('Index out of range')
if (offset < 0) throw new RangeError('Index out of range')
}
function writeFloat (buf, value, offset, littleEndian, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) {
checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38)
}
ieee754.write(buf, value, offset, littleEndian, 23, 4)
return offset + 4
}
Buffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) {
return writeFloat(this, value, offset, true, noAssert)
}
Buffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) {
return writeFloat(this, value, offset, false, noAssert)
}
function writeDouble (buf, value, offset, littleEndian, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert) {
checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308)
}
ieee754.write(buf, value, offset, littleEndian, 52, 8)
return offset + 8
}
Buffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) {
return writeDouble(this, value, offset, true, noAssert)
}
Buffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) {
return writeDouble(this, value, offset, false, noAssert)
}
// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)
Buffer.prototype.copy = function copy (target, targetStart, start, end) {
if (!Buffer.isBuffer(target)) throw new TypeError('argument should be a Buffer')
if (!start) start = 0
if (!end && end !== 0) end = this.length
if (targetStart >= target.length) targetStart = target.length
if (!targetStart) targetStart = 0
if (end > 0 && end < start) end = start
// Copy 0 bytes; we're done
if (end === start) return 0
if (target.length === 0 || this.length === 0) return 0
// Fatal error conditions
if (targetStart < 0) {
throw new RangeError('targetStart out of bounds')
}
if (start < 0 || start >= this.length) throw new RangeError('Index out of range')
if (end < 0) throw new RangeError('sourceEnd out of bounds')
// Are we oob?
if (end > this.length) end = this.length
if (target.length - targetStart < end - start) {
end = target.length - targetStart + start
}
var len = end - start
if (this === target && typeof Uint8Array.prototype.copyWithin === 'function') {
// Use built-in when available, missing from IE11
this.copyWithin(targetStart, start, end)
} else if (this === target && start < targetStart && targetStart < end) {
// descending copy from end
for (var i = len - 1; i >= 0; --i) {
target[i + targetStart] = this[i + start]
}
} else {
Uint8Array.prototype.set.call(
target,
this.subarray(start, end),
targetStart
)
}
return len
}
// Usage:
// buffer.fill(number[, offset[, end]])
// buffer.fill(buffer[, offset[, end]])
// buffer.fill(string[, offset[, end]][, encoding])
Buffer.prototype.fill = function fill (val, start, end, encoding) {
// Handle string cases:
if (typeof val === 'string') {
if (typeof start === 'string') {
encoding = start
start = 0
end = this.length
} else if (typeof end === 'string') {
encoding = end
end = this.length
}
if (encoding !== undefined && typeof encoding !== 'string') {
throw new TypeError('encoding must be a string')
}
if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) {
throw new TypeError('Unknown encoding: ' + encoding)
}
if (val.length === 1) {
var code = val.charCodeAt(0)
if ((encoding === 'utf8' && code < 128) ||
encoding === 'latin1') {
// Fast path: If `val` fits into a single byte, use that numeric value.
val = code
}
}
} else if (typeof val === 'number') {
val = val & 255
}
// Invalid ranges are not set to a default, so can range check early.
if (start < 0 || this.length < start || this.length < end) {
throw new RangeError('Out of range index')
}
if (end <= start) {
return this
}
start = start >>> 0
end = end === undefined ? this.length : end >>> 0
if (!val) val = 0
var i
if (typeof val === 'number') {
for (i = start; i < end; ++i) {
this[i] = val
}
} else {
var bytes = Buffer.isBuffer(val)
? val
: Buffer.from(val, encoding)
var len = bytes.length
if (len === 0) {
throw new TypeError('The value "' + val +
'" is invalid for argument "value"')
}
for (i = 0; i < end - start; ++i) {
this[i + start] = bytes[i % len]
}
}
return this
}
// HELPER FUNCTIONS
// ================
var INVALID_BASE64_RE = /[^+/0-9A-Za-z-_]/g
function base64clean (str) {
// Node takes equal signs as end of the Base64 encoding
str = str.split('=')[0]
// Node strips out invalid characters like \n and \t from the string, base64-js does not
str = str.trim().replace(INVALID_BASE64_RE, '')
// Node converts strings with length < 2 to ''
if (str.length < 2) return ''
// Node allows for non-padded base64 strings (missing trailing ===), base64-js does not
while (str.length % 4 !== 0) {
str = str + '='
}
return str
}
function toHex (n) {
if (n < 16) return '0' + n.toString(16)
return n.toString(16)
}
function utf8ToBytes (string, units) {
units = units || Infinity
var codePoint
var length = string.length
var leadSurrogate = null
var bytes = []
for (var i = 0; i < length; ++i) {
codePoint = string.charCodeAt(i)
// is surrogate component
if (codePoint > 0xD7FF && codePoint < 0xE000) {
// last char was a lead
if (!leadSurrogate) {
// no lead yet
if (codePoint > 0xDBFF) {
// unexpected trail
if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
continue
} else if (i + 1 === length) {
// unpaired lead
if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
continue
}
// valid lead
leadSurrogate = codePoint
continue
}
// 2 leads in a row
if (codePoint < 0xDC00) {
if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
leadSurrogate = codePoint
continue
}
// valid surrogate pair
codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000
} else if (leadSurrogate) {
// valid bmp char, but last char was a lead
if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
}
leadSurrogate = null
// encode utf8
if (codePoint < 0x80) {
if ((units -= 1) < 0) break
bytes.push(codePoint)
} else if (codePoint < 0x800) {
if ((units -= 2) < 0) break
bytes.push(
codePoint >> 0x6 | 0xC0,
codePoint & 0x3F | 0x80
)
} else if (codePoint < 0x10000) {
if ((units -= 3) < 0) break
bytes.push(
codePoint >> 0xC | 0xE0,
codePoint >> 0x6 & 0x3F | 0x80,
codePoint & 0x3F | 0x80
)
} else if (codePoint < 0x110000) {
if ((units -= 4) < 0) break
bytes.push(
codePoint >> 0x12 | 0xF0,
codePoint >> 0xC & 0x3F | 0x80,
codePoint >> 0x6 & 0x3F | 0x80,
codePoint & 0x3F | 0x80
)
} else {
throw new Error('Invalid code point')
}
}
return bytes
}
function asciiToBytes (str) {
var byteArray = []
for (var i = 0; i < str.length; ++i) {
// Node's code seems to be doing this and not & 0x7F..
byteArray.push(str.charCodeAt(i) & 0xFF)
}
return byteArray
}
function utf16leToBytes (str, units) {
var c, hi, lo
var byteArray = []
for (var i = 0; i < str.length; ++i) {
if ((units -= 2) < 0) break
c = str.charCodeAt(i)
hi = c >> 8
lo = c % 256
byteArray.push(lo)
byteArray.push(hi)
}
return byteArray
}
function base64ToBytes (str) {
return base64.toByteArray(base64clean(str))
}
function blitBuffer (src, dst, offset, length) {
for (var i = 0; i < length; ++i) {
if ((i + offset >= dst.length) || (i >= src.length)) break
dst[i + offset] = src[i]
}
return i
}
// ArrayBuffer or Uint8Array objects from other contexts (i.e. iframes) do not pass
// the `instanceof` check but they should be treated as of that type.
// See: https://github.com/feross/buffer/issues/166
function isInstance (obj, type) {
return obj instanceof type ||
(obj != null && obj.constructor != null && obj.constructor.name != null &&
obj.constructor.name === type.name)
}
function numberIsNaN (obj) {
// For IE11 support
return obj !== obj // eslint-disable-line no-self-compare
}
}).call(this)}).call(this,require("buffer").Buffer)
},{"base64-js":5,"buffer":25,"ieee754":94}],26:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var callBind = require('./');
var $indexOf = callBind(GetIntrinsic('String.prototype.indexOf'));
module.exports = function callBoundIntrinsic(name, allowMissing) {
var intrinsic = GetIntrinsic(name, !!allowMissing);
if (typeof intrinsic === 'function' && $indexOf(name, '.prototype.') > -1) {
return callBind(intrinsic);
}
return intrinsic;
};
},{"./":27,"get-intrinsic":90}],27:[function(require,module,exports){
'use strict';
var bind = require('function-bind');
var GetIntrinsic = require('get-intrinsic');
var $apply = GetIntrinsic('%Function.prototype.apply%');
var $call = GetIntrinsic('%Function.prototype.call%');
var $reflectApply = GetIntrinsic('%Reflect.apply%', true) || bind.call($call, $apply);
var $gOPD = GetIntrinsic('%Object.getOwnPropertyDescriptor%', true);
var $defineProperty = GetIntrinsic('%Object.defineProperty%', true);
var $max = GetIntrinsic('%Math.max%');
if ($defineProperty) {
try {
$defineProperty({}, 'a', { value: 1 });
} catch (e) {
// IE 8 has a broken defineProperty
$defineProperty = null;
}
}
module.exports = function callBind(originalFunction) {
var func = $reflectApply(bind, $call, arguments);
if ($gOPD && $defineProperty) {
var desc = $gOPD(func, 'length');
if (desc.configurable) {
// original length, plus the receiver, minus any additional arguments (after the receiver)
$defineProperty(
func,
'length',
{ value: 1 + $max(0, originalFunction.length - (arguments.length - 1)) }
);
}
}
return func;
};
var applyBind = function applyBind() {
return $reflectApply(bind, $apply, arguments);
};
if ($defineProperty) {
$defineProperty(module.exports, 'apply', { value: applyBind });
} else {
module.exports.apply = applyBind;
}
},{"function-bind":89,"get-intrinsic":90}],28:[function(require,module,exports){
var pSlice = Array.prototype.slice;
var objectKeys = require('./lib/keys.js');
var isArguments = require('./lib/is_arguments.js');
var deepEqual = module.exports = function (actual, expected, opts) {
if (!opts) opts = {};
// 7.1. All identical values are equivalent, as determined by ===.
if (actual === expected) {
return true;
} else if (actual instanceof Date && expected instanceof Date) {
return actual.getTime() === expected.getTime();
// 7.3. Other pairs that do not both pass typeof value == 'object',
// equivalence is determined by ==.
} else if (!actual || !expected || typeof actual != 'object' && typeof expected != 'object') {
return opts.strict ? actual === expected : actual == expected;
// 7.4. For all other Object pairs, including Array objects, equivalence is
// determined by having the same number of owned properties (as verified
// with Object.prototype.hasOwnProperty.call), the same set of keys
// (although not necessarily the same order), equivalent values for every
// corresponding key, and an identical 'prototype' property. Note: this
// accounts for both named and indexed properties on Arrays.
} else {
return objEquiv(actual, expected, opts);
}
}
function isUndefinedOrNull(value) {
return value === null || value === undefined;
}
function isBuffer (x) {
if (!x || typeof x !== 'object' || typeof x.length !== 'number') return false;
if (typeof x.copy !== 'function' || typeof x.slice !== 'function') {
return false;
}
if (x.length > 0 && typeof x[0] !== 'number') return false;
return true;
}
function objEquiv(a, b, opts) {
var i, key;
if (isUndefinedOrNull(a) || isUndefinedOrNull(b))
return false;
// an identical 'prototype' property.
if (a.prototype !== b.prototype) return false;
//~~~I've managed to break Object.keys through screwy arguments passing.
// Converting to array solves the problem.
if (isArguments(a)) {
if (!isArguments(b)) {
return false;
}
a = pSlice.call(a);
b = pSlice.call(b);
return deepEqual(a, b, opts);
}
if (isBuffer(a)) {
if (!isBuffer(b)) {
return false;
}
if (a.length !== b.length) return false;
for (i = 0; i < a.length; i++) {
if (a[i] !== b[i]) return false;
}
return true;
}
try {
var ka = objectKeys(a),
kb = objectKeys(b);
} catch (e) {//happens when one is a string literal and the other isn't
return false;
}
// having the same number of owned properties (keys incorporates
// hasOwnProperty)
if (ka.length != kb.length)
return false;
//the same set of keys (although not necessarily the same order),
ka.sort();
kb.sort();
//~~~cheap key test
for (i = ka.length - 1; i >= 0; i--) {
if (ka[i] != kb[i])
return false;
}
//equivalent values for every corresponding key, and
//~~~possibly expensive deep test
for (i = ka.length - 1; i >= 0; i--) {
key = ka[i];
if (!deepEqual(a[key], b[key], opts)) return false;
}
return typeof a === typeof b;
}
},{"./lib/is_arguments.js":29,"./lib/keys.js":30}],29:[function(require,module,exports){
var supportsArgumentsClass = (function(){
return Object.prototype.toString.call(arguments)
})() == '[object Arguments]';
exports = module.exports = supportsArgumentsClass ? supported : unsupported;
exports.supported = supported;
function supported(object) {
return Object.prototype.toString.call(object) == '[object Arguments]';
};
exports.unsupported = unsupported;
function unsupported(object){
return object &&
typeof object == 'object' &&
typeof object.length == 'number' &&
Object.prototype.hasOwnProperty.call(object, 'callee') &&
!Object.prototype.propertyIsEnumerable.call(object, 'callee') ||
false;
};
},{}],30:[function(require,module,exports){
exports = module.exports = typeof Object.keys === 'function'
? Object.keys : shim;
exports.shim = shim;
function shim (obj) {
var keys = [];
for (var key in obj) keys.push(key);
return keys;
}
},{}],31:[function(require,module,exports){
'use strict';
var keys = require('object-keys');
var hasSymbols = typeof Symbol === 'function' && typeof Symbol('foo') === 'symbol';
var toStr = Object.prototype.toString;
var concat = Array.prototype.concat;
var origDefineProperty = Object.defineProperty;
var isFunction = function (fn) {
return typeof fn === 'function' && toStr.call(fn) === '[object Function]';
};
var arePropertyDescriptorsSupported = function () {
var obj = {};
try {
origDefineProperty(obj, 'x', { enumerable: false, value: obj });
// eslint-disable-next-line no-unused-vars, no-restricted-syntax
for (var _ in obj) { // jscs:ignore disallowUnusedVariables
return false;
}
return obj.x === obj;
} catch (e) { /* this is IE 8. */
return false;
}
};
var supportsDescriptors = origDefineProperty && arePropertyDescriptorsSupported();
var defineProperty = function (object, name, value, predicate) {
if (name in object && (!isFunction(predicate) || !predicate())) {
return;
}
if (supportsDescriptors) {
origDefineProperty(object, name, {
configurable: true,
enumerable: false,
value: value,
writable: true
});
} else {
object[name] = value;
}
};
var defineProperties = function (object, map) {
var predicates = arguments.length > 2 ? arguments[2] : {};
var props = keys(map);
if (hasSymbols) {
props = concat.call(props, Object.getOwnPropertySymbols(map));
}
for (var i = 0; i < props.length; i += 1) {
defineProperty(object, props[i], map[props[i]], predicates[props[i]]);
}
};
defineProperties.supportsDescriptors = !!supportsDescriptors;
module.exports = defineProperties;
},{"object-keys":98}],32:[function(require,module,exports){
module.exports = function () {
for (var i = 0; i < arguments.length; i++) {
if (arguments[i] !== undefined) return arguments[i];
}
};
},{}],33:[function(require,module,exports){
'use strict';
var ToNumber = require('./ToNumber');
var ToPrimitive = require('./ToPrimitive');
var Type = require('./Type');
// https://262.ecma-international.org/5.1/#sec-11.9.3
module.exports = function AbstractEqualityComparison(x, y) {
var xType = Type(x);
var yType = Type(y);
if (xType === yType) {
return x === y; // ES6+ specified this shortcut anyways.
}
if (x == null && y == null) {
return true;
}
if (xType === 'Number' && yType === 'String') {
return AbstractEqualityComparison(x, ToNumber(y));
}
if (xType === 'String' && yType === 'Number') {
return AbstractEqualityComparison(ToNumber(x), y);
}
if (xType === 'Boolean') {
return AbstractEqualityComparison(ToNumber(x), y);
}
if (yType === 'Boolean') {
return AbstractEqualityComparison(x, ToNumber(y));
}
if ((xType === 'String' || xType === 'Number') && yType === 'Object') {
return AbstractEqualityComparison(x, ToPrimitive(y));
}
if (xType === 'Object' && (yType === 'String' || yType === 'Number')) {
return AbstractEqualityComparison(ToPrimitive(x), y);
}
return false;
};
},{"./ToNumber":63,"./ToPrimitive":65,"./Type":70}],34:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $Number = GetIntrinsic('%Number%');
var $TypeError = GetIntrinsic('%TypeError%');
var $isNaN = require('../helpers/isNaN');
var $isFinite = require('../helpers/isFinite');
var isPrefixOf = require('../helpers/isPrefixOf');
var ToNumber = require('./ToNumber');
var ToPrimitive = require('./ToPrimitive');
var Type = require('./Type');
// https://262.ecma-international.org/5.1/#sec-11.8.5
// eslint-disable-next-line max-statements
module.exports = function AbstractRelationalComparison(x, y, LeftFirst) {
if (Type(LeftFirst) !== 'Boolean') {
throw new $TypeError('Assertion failed: LeftFirst argument must be a Boolean');
}
var px;
var py;
if (LeftFirst) {
px = ToPrimitive(x, $Number);
py = ToPrimitive(y, $Number);
} else {
py = ToPrimitive(y, $Number);
px = ToPrimitive(x, $Number);
}
var bothStrings = Type(px) === 'String' && Type(py) === 'String';
if (!bothStrings) {
var nx = ToNumber(px);
var ny = ToNumber(py);
if ($isNaN(nx) || $isNaN(ny)) {
return undefined;
}
if ($isFinite(nx) && $isFinite(ny) && nx === ny) {
return false;
}
if (nx === Infinity) {
return false;
}
if (ny === Infinity) {
return true;
}
if (ny === -Infinity) {
return false;
}
if (nx === -Infinity) {
return true;
}
return nx < ny; // by now, these are both nonzero, finite, and not equal
}
if (isPrefixOf(py, px)) {
return false;
}
if (isPrefixOf(px, py)) {
return true;
}
return px < py; // both strings, neither a prefix of the other. shortcut for steps c-f
};
},{"../helpers/isFinite":79,"../helpers/isNaN":80,"../helpers/isPrefixOf":81,"./ToNumber":63,"./ToPrimitive":65,"./Type":70,"get-intrinsic":90}],35:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $TypeError = GetIntrinsic('%TypeError%');
// http://262.ecma-international.org/5.1/#sec-9.10
module.exports = function CheckObjectCoercible(value, optMessage) {
if (value == null) {
throw new $TypeError(optMessage || ('Cannot call method on ' + value));
}
return value;
};
},{"get-intrinsic":90}],36:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $EvalError = GetIntrinsic('%EvalError%');
var DayWithinYear = require('./DayWithinYear');
var InLeapYear = require('./InLeapYear');
var MonthFromTime = require('./MonthFromTime');
// https://262.ecma-international.org/5.1/#sec-15.9.1.5
module.exports = function DateFromTime(t) {
var m = MonthFromTime(t);
var d = DayWithinYear(t);
if (m === 0) {
return d + 1;
}
if (m === 1) {
return d - 30;
}
var leap = InLeapYear(t);
if (m === 2) {
return d - 58 - leap;
}
if (m === 3) {
return d - 89 - leap;
}
if (m === 4) {
return d - 119 - leap;
}
if (m === 5) {
return d - 150 - leap;
}
if (m === 6) {
return d - 180 - leap;
}
if (m === 7) {
return d - 211 - leap;
}
if (m === 8) {
return d - 242 - leap;
}
if (m === 9) {
return d - 272 - leap;
}
if (m === 10) {
return d - 303 - leap;
}
if (m === 11) {
return d - 333 - leap;
}
throw new $EvalError('Assertion failed: MonthFromTime returned an impossible value: ' + m);
};
},{"./DayWithinYear":39,"./InLeapYear":43,"./MonthFromTime":53,"get-intrinsic":90}],37:[function(require,module,exports){
'use strict';
var floor = require('./floor');
var msPerDay = require('../helpers/timeConstants').msPerDay;
// https://262.ecma-international.org/5.1/#sec-15.9.1.2
module.exports = function Day(t) {
return floor(t / msPerDay);
};
},{"../helpers/timeConstants":85,"./floor":74}],38:[function(require,module,exports){
'use strict';
var floor = require('./floor');
// https://262.ecma-international.org/5.1/#sec-15.9.1.3
module.exports = function DayFromYear(y) {
return (365 * (y - 1970)) + floor((y - 1969) / 4) - floor((y - 1901) / 100) + floor((y - 1601) / 400);
};
},{"./floor":74}],39:[function(require,module,exports){
'use strict';
var Day = require('./Day');
var DayFromYear = require('./DayFromYear');
var YearFromTime = require('./YearFromTime');
// https://262.ecma-international.org/5.1/#sec-15.9.1.4
module.exports = function DayWithinYear(t) {
return Day(t) - DayFromYear(YearFromTime(t));
};
},{"./Day":37,"./DayFromYear":38,"./YearFromTime":72}],40:[function(require,module,exports){
'use strict';
var modulo = require('./modulo');
// https://262.ecma-international.org/5.1/#sec-15.9.1.3
module.exports = function DaysInYear(y) {
if (modulo(y, 4) !== 0) {
return 365;
}
if (modulo(y, 100) !== 0) {
return 366;
}
if (modulo(y, 400) !== 0) {
return 365;
}
return 366;
};
},{"./modulo":75}],41:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $TypeError = GetIntrinsic('%TypeError%');
var Type = require('./Type');
var IsDataDescriptor = require('./IsDataDescriptor');
var IsAccessorDescriptor = require('./IsAccessorDescriptor');
var assertRecord = require('../helpers/assertRecord');
// https://262.ecma-international.org/5.1/#sec-8.10.4
module.exports = function FromPropertyDescriptor(Desc) {
if (typeof Desc === 'undefined') {
return Desc;
}
assertRecord(Type, 'Property Descriptor', 'Desc', Desc);
if (IsDataDescriptor(Desc)) {
return {
value: Desc['[[Value]]'],
writable: !!Desc['[[Writable]]'],
enumerable: !!Desc['[[Enumerable]]'],
configurable: !!Desc['[[Configurable]]']
};
} else if (IsAccessorDescriptor(Desc)) {
return {
get: Desc['[[Get]]'],
set: Desc['[[Set]]'],
enumerable: !!Desc['[[Enumerable]]'],
configurable: !!Desc['[[Configurable]]']
};
} else {
throw new $TypeError('FromPropertyDescriptor must be called with a fully populated Property Descriptor');
}
};
},{"../helpers/assertRecord":78,"./IsAccessorDescriptor":44,"./IsDataDescriptor":46,"./Type":70,"get-intrinsic":90}],42:[function(require,module,exports){
'use strict';
var floor = require('./floor');
var modulo = require('./modulo');
var timeConstants = require('../helpers/timeConstants');
var msPerHour = timeConstants.msPerHour;
var HoursPerDay = timeConstants.HoursPerDay;
// https://262.ecma-international.org/5.1/#sec-15.9.1.10
module.exports = function HourFromTime(t) {
return modulo(floor(t / msPerHour), HoursPerDay);
};
},{"../helpers/timeConstants":85,"./floor":74,"./modulo":75}],43:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $EvalError = GetIntrinsic('%EvalError%');
var DaysInYear = require('./DaysInYear');
var YearFromTime = require('./YearFromTime');
// https://262.ecma-international.org/5.1/#sec-15.9.1.3
module.exports = function InLeapYear(t) {
var days = DaysInYear(YearFromTime(t));
if (days === 365) {
return 0;
}
if (days === 366) {
return 1;
}
throw new $EvalError('Assertion failed: there are not 365 or 366 days in a year, got: ' + days);
};
},{"./DaysInYear":40,"./YearFromTime":72,"get-intrinsic":90}],44:[function(require,module,exports){
'use strict';
var has = require('has');
var Type = require('./Type');
var assertRecord = require('../helpers/assertRecord');
// https://262.ecma-international.org/5.1/#sec-8.10.1
module.exports = function IsAccessorDescriptor(Desc) {
if (typeof Desc === 'undefined') {
return false;
}
assertRecord(Type, 'Property Descriptor', 'Desc', Desc);
if (!has(Desc, '[[Get]]') && !has(Desc, '[[Set]]')) {
return false;
}
return true;
};
},{"../helpers/assertRecord":78,"./Type":70,"has":93}],45:[function(require,module,exports){
'use strict';
// http://262.ecma-international.org/5.1/#sec-9.11
module.exports = require('is-callable');
},{"is-callable":96}],46:[function(require,module,exports){
'use strict';
var has = require('has');
var Type = require('./Type');
var assertRecord = require('../helpers/assertRecord');
// https://262.ecma-international.org/5.1/#sec-8.10.2
module.exports = function IsDataDescriptor(Desc) {
if (typeof Desc === 'undefined') {
return false;
}
assertRecord(Type, 'Property Descriptor', 'Desc', Desc);
if (!has(Desc, '[[Value]]') && !has(Desc, '[[Writable]]')) {
return false;
}
return true;
};
},{"../helpers/assertRecord":78,"./Type":70,"has":93}],47:[function(require,module,exports){
'use strict';
var IsAccessorDescriptor = require('./IsAccessorDescriptor');
var IsDataDescriptor = require('./IsDataDescriptor');
var Type = require('./Type');
var assertRecord = require('../helpers/assertRecord');
// https://262.ecma-international.org/5.1/#sec-8.10.3
module.exports = function IsGenericDescriptor(Desc) {
if (typeof Desc === 'undefined') {
return false;
}
assertRecord(Type, 'Property Descriptor', 'Desc', Desc);
if (!IsAccessorDescriptor(Desc) && !IsDataDescriptor(Desc)) {
return true;
}
return false;
};
},{"../helpers/assertRecord":78,"./IsAccessorDescriptor":44,"./IsDataDescriptor":46,"./Type":70}],48:[function(require,module,exports){
'use strict';
// TODO, semver-major: delete this
var isPropertyDescriptor = require('../helpers/isPropertyDescriptor');
var Type = require('./Type');
var IsDataDescriptor = require('./IsDataDescriptor');
var IsAccessorDescriptor = require('./IsAccessorDescriptor');
// https://262.ecma-international.org/6.0/#sec-property-descriptor-specification-type
module.exports = function IsPropertyDescriptor(Desc) {
return isPropertyDescriptor({
IsDataDescriptor: IsDataDescriptor,
IsAccessorDescriptor: IsAccessorDescriptor,
Type: Type
}, Desc);
};
},{"../helpers/isPropertyDescriptor":82,"./IsAccessorDescriptor":44,"./IsDataDescriptor":46,"./Type":70}],49:[function(require,module,exports){
'use strict';
var $isFinite = require('../helpers/isFinite');
var msPerDay = require('../helpers/timeConstants').msPerDay;
// https://262.ecma-international.org/5.1/#sec-15.9.1.13
module.exports = function MakeDate(day, time) {
if (!$isFinite(day) || !$isFinite(time)) {
return NaN;
}
return (day * msPerDay) + time;
};
},{"../helpers/isFinite":79,"../helpers/timeConstants":85}],50:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $DateUTC = GetIntrinsic('%Date.UTC%');
var $isFinite = require('../helpers/isFinite');
var DateFromTime = require('./DateFromTime');
var Day = require('./Day');
var floor = require('./floor');
var modulo = require('./modulo');
var MonthFromTime = require('./MonthFromTime');
var ToInteger = require('./ToInteger');
var YearFromTime = require('./YearFromTime');
// https://262.ecma-international.org/5.1/#sec-15.9.1.12
module.exports = function MakeDay(year, month, date) {
if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) {
return NaN;
}
var y = ToInteger(year);
var m = ToInteger(month);
var dt = ToInteger(date);
var ym = y + floor(m / 12);
var mn = modulo(m, 12);
var t = $DateUTC(ym, mn, 1);
if (YearFromTime(t) !== ym || MonthFromTime(t) !== mn || DateFromTime(t) !== 1) {
return NaN;
}
return Day(t) + dt - 1;
};
},{"../helpers/isFinite":79,"./DateFromTime":36,"./Day":37,"./MonthFromTime":53,"./ToInteger":62,"./YearFromTime":72,"./floor":74,"./modulo":75,"get-intrinsic":90}],51:[function(require,module,exports){
'use strict';
var $isFinite = require('../helpers/isFinite');
var timeConstants = require('../helpers/timeConstants');
var msPerSecond = timeConstants.msPerSecond;
var msPerMinute = timeConstants.msPerMinute;
var msPerHour = timeConstants.msPerHour;
var ToInteger = require('./ToInteger');
// https://262.ecma-international.org/5.1/#sec-15.9.1.11
module.exports = function MakeTime(hour, min, sec, ms) {
if (!$isFinite(hour) || !$isFinite(min) || !$isFinite(sec) || !$isFinite(ms)) {
return NaN;
}
var h = ToInteger(hour);
var m = ToInteger(min);
var s = ToInteger(sec);
var milli = ToInteger(ms);
var t = (h * msPerHour) + (m * msPerMinute) + (s * msPerSecond) + milli;
return t;
};
},{"../helpers/isFinite":79,"../helpers/timeConstants":85,"./ToInteger":62}],52:[function(require,module,exports){
'use strict';
var floor = require('./floor');
var modulo = require('./modulo');
var timeConstants = require('../helpers/timeConstants');
var msPerMinute = timeConstants.msPerMinute;
var MinutesPerHour = timeConstants.MinutesPerHour;
// https://262.ecma-international.org/5.1/#sec-15.9.1.10
module.exports = function MinFromTime(t) {
return modulo(floor(t / msPerMinute), MinutesPerHour);
};
},{"../helpers/timeConstants":85,"./floor":74,"./modulo":75}],53:[function(require,module,exports){
'use strict';
var DayWithinYear = require('./DayWithinYear');
var InLeapYear = require('./InLeapYear');
// https://262.ecma-international.org/5.1/#sec-15.9.1.4
module.exports = function MonthFromTime(t) {
var day = DayWithinYear(t);
if (0 <= day && day < 31) {
return 0;
}
var leap = InLeapYear(t);
if (31 <= day && day < (59 + leap)) {
return 1;
}
if ((59 + leap) <= day && day < (90 + leap)) {
return 2;
}
if ((90 + leap) <= day && day < (120 + leap)) {
return 3;
}
if ((120 + leap) <= day && day < (151 + leap)) {
return 4;
}
if ((151 + leap) <= day && day < (181 + leap)) {
return 5;
}
if ((181 + leap) <= day && day < (212 + leap)) {
return 6;
}
if ((212 + leap) <= day && day < (243 + leap)) {
return 7;
}
if ((243 + leap) <= day && day < (273 + leap)) {
return 8;
}
if ((273 + leap) <= day && day < (304 + leap)) {
return 9;
}
if ((304 + leap) <= day && day < (334 + leap)) {
return 10;
}
if ((334 + leap) <= day && day < (365 + leap)) {
return 11;
}
};
},{"./DayWithinYear":39,"./InLeapYear":43}],54:[function(require,module,exports){
'use strict';
var $isNaN = require('../helpers/isNaN');
// http://262.ecma-international.org/5.1/#sec-9.12
module.exports = function SameValue(x, y) {
if (x === y) { // 0 === -0, but they are not identical.
if (x === 0) { return 1 / x === 1 / y; }
return true;
}
return $isNaN(x) && $isNaN(y);
};
},{"../helpers/isNaN":80}],55:[function(require,module,exports){
'use strict';
var floor = require('./floor');
var modulo = require('./modulo');
var timeConstants = require('../helpers/timeConstants');
var msPerSecond = timeConstants.msPerSecond;
var SecondsPerMinute = timeConstants.SecondsPerMinute;
// https://262.ecma-international.org/5.1/#sec-15.9.1.10
module.exports = function SecFromTime(t) {
return modulo(floor(t / msPerSecond), SecondsPerMinute);
};
},{"../helpers/timeConstants":85,"./floor":74,"./modulo":75}],56:[function(require,module,exports){
'use strict';
var Type = require('./Type');
// https://262.ecma-international.org/5.1/#sec-11.9.6
module.exports = function StrictEqualityComparison(x, y) {
var xType = Type(x);
var yType = Type(y);
if (xType !== yType) {
return false;
}
if (xType === 'Undefined' || xType === 'Null') {
return true;
}
return x === y; // shortcut for steps 4-7
};
},{"./Type":70}],57:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $Date = GetIntrinsic('%Date%');
var $Number = GetIntrinsic('%Number%');
var $isFinite = require('../helpers/isFinite');
var abs = require('./abs');
var ToNumber = require('./ToNumber');
// https://262.ecma-international.org/5.1/#sec-15.9.1.14
module.exports = function TimeClip(time) {
if (!$isFinite(time) || abs(time) > 8.64e15) {
return NaN;
}
return $Number(new $Date(ToNumber(time)));
};
},{"../helpers/isFinite":79,"./ToNumber":63,"./abs":73,"get-intrinsic":90}],58:[function(require,module,exports){
'use strict';
var msPerDay = require('../helpers/timeConstants').msPerDay;
var DayFromYear = require('./DayFromYear');
// https://262.ecma-international.org/5.1/#sec-15.9.1.3
module.exports = function TimeFromYear(y) {
return msPerDay * DayFromYear(y);
};
},{"../helpers/timeConstants":85,"./DayFromYear":38}],59:[function(require,module,exports){
'use strict';
var modulo = require('./modulo');
var msPerDay = require('../helpers/timeConstants').msPerDay;
// https://262.ecma-international.org/5.1/#sec-15.9.1.2
module.exports = function TimeWithinDay(t) {
return modulo(t, msPerDay);
};
},{"../helpers/timeConstants":85,"./modulo":75}],60:[function(require,module,exports){
'use strict';
// http://262.ecma-international.org/5.1/#sec-9.2
module.exports = function ToBoolean(value) { return !!value; };
},{}],61:[function(require,module,exports){
'use strict';
var ToNumber = require('./ToNumber');
// http://262.ecma-international.org/5.1/#sec-9.5
module.exports = function ToInt32(x) {
return ToNumber(x) >> 0;
};
},{"./ToNumber":63}],62:[function(require,module,exports){
'use strict';
var abs = require('./abs');
var floor = require('./floor');
var ToNumber = require('./ToNumber');
var $isNaN = require('../helpers/isNaN');
var $isFinite = require('../helpers/isFinite');
var $sign = require('../helpers/sign');
// http://262.ecma-international.org/5.1/#sec-9.4
module.exports = function ToInteger(value) {
var number = ToNumber(value);
if ($isNaN(number)) { return 0; }
if (number === 0 || !$isFinite(number)) { return number; }
return $sign(number) * floor(abs(number));
};
},{"../helpers/isFinite":79,"../helpers/isNaN":80,"../helpers/sign":84,"./ToNumber":63,"./abs":73,"./floor":74}],63:[function(require,module,exports){
'use strict';
var ToPrimitive = require('./ToPrimitive');
// http://262.ecma-international.org/5.1/#sec-9.3
module.exports = function ToNumber(value) {
var prim = ToPrimitive(value, Number);
if (typeof prim !== 'string') {
return +prim; // eslint-disable-line no-implicit-coercion
}
// eslint-disable-next-line no-control-regex
var trimmed = prim.replace(/^[ \t\x0b\f\xa0\ufeff\n\r\u2028\u2029\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u0085]+|[ \t\x0b\f\xa0\ufeff\n\r\u2028\u2029\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u0085]+$/g, '');
if ((/^0[ob]|^[+-]0x/).test(trimmed)) {
return NaN;
}
return +trimmed; // eslint-disable-line no-implicit-coercion
};
},{"./ToPrimitive":65}],64:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $Object = GetIntrinsic('%Object%');
var CheckObjectCoercible = require('./CheckObjectCoercible');
// http://262.ecma-international.org/5.1/#sec-9.9
module.exports = function ToObject(value) {
CheckObjectCoercible(value);
return $Object(value);
};
},{"./CheckObjectCoercible":35,"get-intrinsic":90}],65:[function(require,module,exports){
'use strict';
// http://262.ecma-international.org/5.1/#sec-9.1
module.exports = require('es-to-primitive/es5');
},{"es-to-primitive/es5":86}],66:[function(require,module,exports){
'use strict';
var has = require('has');
var GetIntrinsic = require('get-intrinsic');
var $TypeError = GetIntrinsic('%TypeError%');
var Type = require('./Type');
var ToBoolean = require('./ToBoolean');
var IsCallable = require('./IsCallable');
// https://262.ecma-international.org/5.1/#sec-8.10.5
module.exports = function ToPropertyDescriptor(Obj) {
if (Type(Obj) !== 'Object') {
throw new $TypeError('ToPropertyDescriptor requires an object');
}
var desc = {};
if (has(Obj, 'enumerable')) {
desc['[[Enumerable]]'] = ToBoolean(Obj.enumerable);
}
if (has(Obj, 'configurable')) {
desc['[[Configurable]]'] = ToBoolean(Obj.configurable);
}
if (has(Obj, 'value')) {
desc['[[Value]]'] = Obj.value;
}
if (has(Obj, 'writable')) {
desc['[[Writable]]'] = ToBoolean(Obj.writable);
}
if (has(Obj, 'get')) {
var getter = Obj.get;
if (typeof getter !== 'undefined' && !IsCallable(getter)) {
throw new $TypeError('getter must be a function');
}
desc['[[Get]]'] = getter;
}
if (has(Obj, 'set')) {
var setter = Obj.set;
if (typeof setter !== 'undefined' && !IsCallable(setter)) {
throw new $TypeError('setter must be a function');
}
desc['[[Set]]'] = setter;
}
if ((has(desc, '[[Get]]') || has(desc, '[[Set]]')) && (has(desc, '[[Value]]') || has(desc, '[[Writable]]'))) {
throw new $TypeError('Invalid property descriptor. Cannot both specify accessors and a value or writable attribute');
}
return desc;
};
},{"./IsCallable":45,"./ToBoolean":60,"./Type":70,"get-intrinsic":90,"has":93}],67:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $String = GetIntrinsic('%String%');
// http://262.ecma-international.org/5.1/#sec-9.8
module.exports = function ToString(value) {
return $String(value);
};
},{"get-intrinsic":90}],68:[function(require,module,exports){
'use strict';
var abs = require('./abs');
var floor = require('./floor');
var modulo = require('./modulo');
var ToNumber = require('./ToNumber');
var $isNaN = require('../helpers/isNaN');
var $isFinite = require('../helpers/isFinite');
var $sign = require('../helpers/sign');
// http://262.ecma-international.org/5.1/#sec-9.7
module.exports = function ToUint16(value) {
var number = ToNumber(value);
if ($isNaN(number) || number === 0 || !$isFinite(number)) { return 0; }
var posInt = $sign(number) * floor(abs(number));
return modulo(posInt, 0x10000);
};
},{"../helpers/isFinite":79,"../helpers/isNaN":80,"../helpers/sign":84,"./ToNumber":63,"./abs":73,"./floor":74,"./modulo":75}],69:[function(require,module,exports){
'use strict';
var ToNumber = require('./ToNumber');
// http://262.ecma-international.org/5.1/#sec-9.6
module.exports = function ToUint32(x) {
return ToNumber(x) >>> 0;
};
},{"./ToNumber":63}],70:[function(require,module,exports){
'use strict';
// https://262.ecma-international.org/5.1/#sec-8
module.exports = function Type(x) {
if (x === null) {
return 'Null';
}
if (typeof x === 'undefined') {
return 'Undefined';
}
if (typeof x === 'function' || typeof x === 'object') {
return 'Object';
}
if (typeof x === 'number') {
return 'Number';
}
if (typeof x === 'boolean') {
return 'Boolean';
}
if (typeof x === 'string') {
return 'String';
}
};
},{}],71:[function(require,module,exports){
'use strict';
var Day = require('./Day');
var modulo = require('./modulo');
// https://262.ecma-international.org/5.1/#sec-15.9.1.6
module.exports = function WeekDay(t) {
return modulo(Day(t) + 4, 7);
};
},{"./Day":37,"./modulo":75}],72:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $Date = GetIntrinsic('%Date%');
var callBound = require('call-bind/callBound');
var $getUTCFullYear = callBound('Date.prototype.getUTCFullYear');
// https://262.ecma-international.org/5.1/#sec-15.9.1.3
module.exports = function YearFromTime(t) {
// largest y such that this.TimeFromYear(y) <= t
return $getUTCFullYear(new $Date(t));
};
},{"call-bind/callBound":26,"get-intrinsic":90}],73:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $abs = GetIntrinsic('%Math.abs%');
// http://262.ecma-international.org/5.1/#sec-5.2
module.exports = function abs(x) {
return $abs(x);
};
},{"get-intrinsic":90}],74:[function(require,module,exports){
'use strict';
// var modulo = require('./modulo');
var $floor = Math.floor;
// http://262.ecma-international.org/5.1/#sec-5.2
module.exports = function floor(x) {
// return x - modulo(x, 1);
return $floor(x);
};
},{}],75:[function(require,module,exports){
'use strict';
var mod = require('../helpers/mod');
// https://262.ecma-international.org/5.1/#sec-5.2
module.exports = function modulo(x, y) {
return mod(x, y);
};
},{"../helpers/mod":83}],76:[function(require,module,exports){
'use strict';
var modulo = require('./modulo');
var msPerSecond = require('../helpers/timeConstants').msPerSecond;
// https://262.ecma-international.org/5.1/#sec-15.9.1.10
module.exports = function msFromTime(t) {
return modulo(t, msPerSecond);
};
},{"../helpers/timeConstants":85,"./modulo":75}],77:[function(require,module,exports){
'use strict';
/* eslint global-require: 0 */
// https://es5.github.io/#x9
module.exports = {
'Abstract Equality Comparison': require('./5/AbstractEqualityComparison'),
'Abstract Relational Comparison': require('./5/AbstractRelationalComparison'),
'Strict Equality Comparison': require('./5/StrictEqualityComparison'),
abs: require('./5/abs'),
CheckObjectCoercible: require('./5/CheckObjectCoercible'),
DateFromTime: require('./5/DateFromTime'),
Day: require('./5/Day'),
DayFromYear: require('./5/DayFromYear'),
DaysInYear: require('./5/DaysInYear'),
DayWithinYear: require('./5/DayWithinYear'),
floor: require('./5/floor'),
FromPropertyDescriptor: require('./5/FromPropertyDescriptor'),
HourFromTime: require('./5/HourFromTime'),
InLeapYear: require('./5/InLeapYear'),
IsAccessorDescriptor: require('./5/IsAccessorDescriptor'),
IsCallable: require('./5/IsCallable'),
IsDataDescriptor: require('./5/IsDataDescriptor'),
IsGenericDescriptor: require('./5/IsGenericDescriptor'),
IsPropertyDescriptor: require('./5/IsPropertyDescriptor'),
MakeDate: require('./5/MakeDate'),
MakeDay: require('./5/MakeDay'),
MakeTime: require('./5/MakeTime'),
MinFromTime: require('./5/MinFromTime'),
modulo: require('./5/modulo'),
MonthFromTime: require('./5/MonthFromTime'),
msFromTime: require('./5/msFromTime'),
SameValue: require('./5/SameValue'),
SecFromTime: require('./5/SecFromTime'),
TimeClip: require('./5/TimeClip'),
TimeFromYear: require('./5/TimeFromYear'),
TimeWithinDay: require('./5/TimeWithinDay'),
ToBoolean: require('./5/ToBoolean'),
ToInt32: require('./5/ToInt32'),
ToInteger: require('./5/ToInteger'),
ToNumber: require('./5/ToNumber'),
ToObject: require('./5/ToObject'),
ToPrimitive: require('./5/ToPrimitive'),
ToPropertyDescriptor: require('./5/ToPropertyDescriptor'),
ToString: require('./5/ToString'),
ToUint16: require('./5/ToUint16'),
ToUint32: require('./5/ToUint32'),
Type: require('./5/Type'),
WeekDay: require('./5/WeekDay'),
YearFromTime: require('./5/YearFromTime')
};
},{"./5/AbstractEqualityComparison":33,"./5/AbstractRelationalComparison":34,"./5/CheckObjectCoercible":35,"./5/DateFromTime":36,"./5/Day":37,"./5/DayFromYear":38,"./5/DayWithinYear":39,"./5/DaysInYear":40,"./5/FromPropertyDescriptor":41,"./5/HourFromTime":42,"./5/InLeapYear":43,"./5/IsAccessorDescriptor":44,"./5/IsCallable":45,"./5/IsDataDescriptor":46,"./5/IsGenericDescriptor":47,"./5/IsPropertyDescriptor":48,"./5/MakeDate":49,"./5/MakeDay":50,"./5/MakeTime":51,"./5/MinFromTime":52,"./5/MonthFromTime":53,"./5/SameValue":54,"./5/SecFromTime":55,"./5/StrictEqualityComparison":56,"./5/TimeClip":57,"./5/TimeFromYear":58,"./5/TimeWithinDay":59,"./5/ToBoolean":60,"./5/ToInt32":61,"./5/ToInteger":62,"./5/ToNumber":63,"./5/ToObject":64,"./5/ToPrimitive":65,"./5/ToPropertyDescriptor":66,"./5/ToString":67,"./5/ToUint16":68,"./5/ToUint32":69,"./5/Type":70,"./5/WeekDay":71,"./5/YearFromTime":72,"./5/abs":73,"./5/floor":74,"./5/modulo":75,"./5/msFromTime":76}],78:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var $TypeError = GetIntrinsic('%TypeError%');
var $SyntaxError = GetIntrinsic('%SyntaxError%');
var has = require('has');
var predicates = {
// https://262.ecma-international.org/6.0/#sec-property-descriptor-specification-type
'Property Descriptor': function isPropertyDescriptor(Type, Desc) {
if (Type(Desc) !== 'Object') {
return false;
}
var allowed = {
'[[Configurable]]': true,
'[[Enumerable]]': true,
'[[Get]]': true,
'[[Set]]': true,
'[[Value]]': true,
'[[Writable]]': true
};
for (var key in Desc) { // eslint-disable-line
if (has(Desc, key) && !allowed[key]) {
return false;
}
}
var isData = has(Desc, '[[Value]]');
var IsAccessor = has(Desc, '[[Get]]') || has(Desc, '[[Set]]');
if (isData && IsAccessor) {
throw new $TypeError('Property Descriptors may not be both accessor and data descriptors');
}
return true;
}
};
module.exports = function assertRecord(Type, recordType, argumentName, value) {
var predicate = predicates[recordType];
if (typeof predicate !== 'function') {
throw new $SyntaxError('unknown record type: ' + recordType);
}
if (!predicate(Type, value)) {
throw new $TypeError(argumentName + ' must be a ' + recordType);
}
};
},{"get-intrinsic":90,"has":93}],79:[function(require,module,exports){
'use strict';
var $isNaN = Number.isNaN || function (a) { return a !== a; };
module.exports = Number.isFinite || function (x) { return typeof x === 'number' && !$isNaN(x) && x !== Infinity && x !== -Infinity; };
},{}],80:[function(require,module,exports){
'use strict';
module.exports = Number.isNaN || function isNaN(a) {
return a !== a;
};
},{}],81:[function(require,module,exports){
'use strict';
var $strSlice = require('call-bind/callBound')('String.prototype.slice');
module.exports = function isPrefixOf(prefix, string) {
if (prefix === string) {
return true;
}
if (prefix.length > string.length) {
return false;
}
return $strSlice(string, 0, prefix.length) === prefix;
};
},{"call-bind/callBound":26}],82:[function(require,module,exports){
'use strict';
var GetIntrinsic = require('get-intrinsic');
var has = require('has');
var $TypeError = GetIntrinsic('%TypeError%');
module.exports = function IsPropertyDescriptor(ES, Desc) {
if (ES.Type(Desc) !== 'Object') {
return false;
}
var allowed = {
'[[Configurable]]': true,
'[[Enumerable]]': true,
'[[Get]]': true,
'[[Set]]': true,
'[[Value]]': true,
'[[Writable]]': true
};
for (var key in Desc) { // eslint-disable-line no-restricted-syntax
if (has(Desc, key) && !allowed[key]) {
return false;
}
}
if (ES.IsDataDescriptor(Desc) && ES.IsAccessorDescriptor(Desc)) {
throw new $TypeError('Property Descriptors may not be both accessor and data descriptors');
}
return true;
};
},{"get-intrinsic":90,"has":93}],83:[function(require,module,exports){
'use strict';
var $floor = Math.floor;
module.exports = function mod(number, modulo) {
var remain = number % modulo;
return $floor(remain >= 0 ? remain : remain + modulo);
};
},{}],84:[function(require,module,exports){
'use strict';
module.exports = function sign(number) {
return number >= 0 ? 1 : -1;
};
},{}],85:[function(require,module,exports){
'use strict';
var HoursPerDay = 24;
var MinutesPerHour = 60;
var SecondsPerMinute = 60;
var msPerSecond = 1e3;
var msPerMinute = msPerSecond * SecondsPerMinute;
var msPerHour = msPerMinute * MinutesPerHour;
var msPerDay = 86400000;
module.exports = {
HoursPerDay: HoursPerDay,
MinutesPerHour: MinutesPerHour,
SecondsPerMinute: SecondsPerMinute,
msPerSecond: msPerSecond,
msPerMinute: msPerMinute,
msPerHour: msPerHour,
msPerDay: msPerDay
};
},{}],86:[function(require,module,exports){
'use strict';
var toStr = Object.prototype.toString;
var isPrimitive = require('./helpers/isPrimitive');
var isCallable = require('is-callable');
// http://ecma-international.org/ecma-262/5.1/#sec-8.12.8
var ES5internalSlots = {
'[[DefaultValue]]': function (O) {
var actualHint;
if (arguments.length > 1) {
actualHint = arguments[1];
} else {
actualHint = toStr.call(O) === '[object Date]' ? String : Number;
}
if (actualHint === String || actualHint === Number) {
var methods = actualHint === String ? ['toString', 'valueOf'] : ['valueOf', 'toString'];
var value, i;
for (i = 0; i < methods.length; ++i) {
if (isCallable(O[methods[i]])) {
value = O[methods[i]]();
if (isPrimitive(value)) {
return value;
}
}
}
throw new TypeError('No default value');
}
throw new TypeError('invalid [[DefaultValue]] hint supplied');
}
};
// http://ecma-international.org/ecma-262/5.1/#sec-9.1
module.exports = function ToPrimitive(input) {
if (isPrimitive(input)) {
return input;
}
if (arguments.length > 1) {
return ES5internalSlots['[[DefaultValue]]'](input, arguments[1]);
}
return ES5internalSlots['[[DefaultValue]]'](input);
};
},{"./helpers/isPrimitive":87,"is-callable":96}],87:[function(require,module,exports){
'use strict';
module.exports = function isPrimitive(value) {
return value === null || (typeof value !== 'function' && typeof value !== 'object');
};
},{}],88:[function(require,module,exports){
'use strict';
/* eslint no-invalid-this: 1 */
var ERROR_MESSAGE = 'Function.prototype.bind called on incompatible ';
var slice = Array.prototype.slice;
var toStr = Object.prototype.toString;
var funcType = '[object Function]';
module.exports = function bind(that) {
var target = this;
if (typeof target !== 'function' || toStr.call(target) !== funcType) {
throw new TypeError(ERROR_MESSAGE + target);
}
var args = slice.call(arguments, 1);
var bound;
var binder = function () {
if (this instanceof bound) {
var result = target.apply(
this,
args.concat(slice.call(arguments))
);
if (Object(result) === result) {
return result;
}
return this;
} else {
return target.apply(
that,
args.concat(slice.call(arguments))
);
}
};
var boundLength = Math.max(0, target.length - args.length);
var boundArgs = [];
for (var i = 0; i < boundLength; i++) {
boundArgs.push('$' + i);
}
bound = Function('binder', 'return function (' + boundArgs.join(',') + '){ return binder.apply(this,arguments); }')(binder);
if (target.prototype) {
var Empty = function Empty() {};
Empty.prototype = target.prototype;
bound.prototype = new Empty();
Empty.prototype = null;
}
return bound;
};
},{}],89:[function(require,module,exports){
'use strict';
var implementation = require('./implementation');
module.exports = Function.prototype.bind || implementation;
},{"./implementation":88}],90:[function(require,module,exports){
'use strict';
var undefined;
var $SyntaxError = SyntaxError;
var $Function = Function;
var $TypeError = TypeError;
// eslint-disable-next-line consistent-return
var getEvalledConstructor = function (expressionSyntax) {
try {
return $Function('"use strict"; return (' + expressionSyntax + ').constructor;')();
} catch (e) {}
};
var $gOPD = Object.getOwnPropertyDescriptor;
if ($gOPD) {
try {
$gOPD({}, '');
} catch (e) {
$gOPD = null; // this is IE 8, which has a broken gOPD
}
}
var throwTypeError = function () {
throw new $TypeError();
};
var ThrowTypeError = $gOPD
? (function () {
try {
// eslint-disable-next-line no-unused-expressions, no-caller, no-restricted-properties
arguments.callee; // IE 8 does not throw here
return throwTypeError;
} catch (calleeThrows) {
try {
// IE 8 throws on Object.getOwnPropertyDescriptor(arguments, '')
return $gOPD(arguments, 'callee').get;
} catch (gOPDthrows) {
return throwTypeError;
}
}
}())
: throwTypeError;
var hasSymbols = require('has-symbols')();
var getProto = Object.getPrototypeOf || function (x) { return x.__proto__; }; // eslint-disable-line no-proto
var needsEval = {};
var TypedArray = typeof Uint8Array === 'undefined' ? undefined : getProto(Uint8Array);
var INTRINSICS = {
'%AggregateError%': typeof AggregateError === 'undefined' ? undefined : AggregateError,
'%Array%': Array,
'%ArrayBuffer%': typeof ArrayBuffer === 'undefined' ? undefined : ArrayBuffer,
'%ArrayIteratorPrototype%': hasSymbols ? getProto([][Symbol.iterator]()) : undefined,
'%AsyncFromSyncIteratorPrototype%': undefined,
'%AsyncFunction%': needsEval,
'%AsyncGenerator%': needsEval,
'%AsyncGeneratorFunction%': needsEval,
'%AsyncIteratorPrototype%': needsEval,
'%Atomics%': typeof Atomics === 'undefined' ? undefined : Atomics,
'%BigInt%': typeof BigInt === 'undefined' ? undefined : BigInt,
'%Boolean%': Boolean,
'%DataView%': typeof DataView === 'undefined' ? undefined : DataView,
'%Date%': Date,
'%decodeURI%': decodeURI,
'%decodeURIComponent%': decodeURIComponent,
'%encodeURI%': encodeURI,
'%encodeURIComponent%': encodeURIComponent,
'%Error%': Error,
'%eval%': eval, // eslint-disable-line no-eval
'%EvalError%': EvalError,
'%Float32Array%': typeof Float32Array === 'undefined' ? undefined : Float32Array,
'%Float64Array%': typeof Float64Array === 'undefined' ? undefined : Float64Array,
'%FinalizationRegistry%': typeof FinalizationRegistry === 'undefined' ? undefined : FinalizationRegistry,
'%Function%': $Function,
'%GeneratorFunction%': needsEval,
'%Int8Array%': typeof Int8Array === 'undefined' ? undefined : Int8Array,
'%Int16Array%': typeof Int16Array === 'undefined' ? undefined : Int16Array,
'%Int32Array%': typeof Int32Array === 'undefined' ? undefined : Int32Array,
'%isFinite%': isFinite,
'%isNaN%': isNaN,
'%IteratorPrototype%': hasSymbols ? getProto(getProto([][Symbol.iterator]())) : undefined,
'%JSON%': typeof JSON === 'object' ? JSON : undefined,
'%Map%': typeof Map === 'undefined' ? undefined : Map,
'%MapIteratorPrototype%': typeof Map === 'undefined' || !hasSymbols ? undefined : getProto(new Map()[Symbol.iterator]()),
'%Math%': Math,
'%Number%': Number,
'%Object%': Object,
'%parseFloat%': parseFloat,
'%parseInt%': parseInt,
'%Promise%': typeof Promise === 'undefined' ? undefined : Promise,
'%Proxy%': typeof Proxy === 'undefined' ? undefined : Proxy,
'%RangeError%': RangeError,
'%ReferenceError%': ReferenceError,
'%Reflect%': typeof Reflect === 'undefined' ? undefined : Reflect,
'%RegExp%': RegExp,
'%Set%': typeof Set === 'undefined' ? undefined : Set,
'%SetIteratorPrototype%': typeof Set === 'undefined' || !hasSymbols ? undefined : getProto(new Set()[Symbol.iterator]()),
'%SharedArrayBuffer%': typeof SharedArrayBuffer === 'undefined' ? undefined : SharedArrayBuffer,
'%String%': String,
'%StringIteratorPrototype%': hasSymbols ? getProto(''[Symbol.iterator]()) : undefined,
'%Symbol%': hasSymbols ? Symbol : undefined,
'%SyntaxError%': $SyntaxError,
'%ThrowTypeError%': ThrowTypeError,
'%TypedArray%': TypedArray,
'%TypeError%': $TypeError,
'%Uint8Array%': typeof Uint8Array === 'undefined' ? undefined : Uint8Array,
'%Uint8ClampedArray%': typeof Uint8ClampedArray === 'undefined' ? undefined : Uint8ClampedArray,
'%Uint16Array%': typeof Uint16Array === 'undefined' ? undefined : Uint16Array,
'%Uint32Array%': typeof Uint32Array === 'undefined' ? undefined : Uint32Array,
'%URIError%': URIError,
'%WeakMap%': typeof WeakMap === 'undefined' ? undefined : WeakMap,
'%WeakRef%': typeof WeakRef === 'undefined' ? undefined : WeakRef,
'%WeakSet%': typeof WeakSet === 'undefined' ? undefined : WeakSet
};
var doEval = function doEval(name) {
var value;
if (name === '%AsyncFunction%') {
value = getEvalledConstructor('async function () {}');
} else if (name === '%GeneratorFunction%') {
value = getEvalledConstructor('function* () {}');
} else if (name === '%AsyncGeneratorFunction%') {
value = getEvalledConstructor('async function* () {}');
} else if (name === '%AsyncGenerator%') {
var fn = doEval('%AsyncGeneratorFunction%');
if (fn) {
value = fn.prototype;
}
} else if (name === '%AsyncIteratorPrototype%') {
var gen = doEval('%AsyncGenerator%');
if (gen) {
value = getProto(gen.prototype);
}
}
INTRINSICS[name] = value;
return value;
};
var LEGACY_ALIASES = {
'%ArrayBufferPrototype%': ['ArrayBuffer', 'prototype'],
'%ArrayPrototype%': ['Array', 'prototype'],
'%ArrayProto_entries%': ['Array', 'prototype', 'entries'],
'%ArrayProto_forEach%': ['Array', 'prototype', 'forEach'],
'%ArrayProto_keys%': ['Array', 'prototype', 'keys'],
'%ArrayProto_values%': ['Array', 'prototype', 'values'],
'%AsyncFunctionPrototype%': ['AsyncFunction', 'prototype'],
'%AsyncGenerator%': ['AsyncGeneratorFunction', 'prototype'],
'%AsyncGeneratorPrototype%': ['AsyncGeneratorFunction', 'prototype', 'prototype'],
'%BooleanPrototype%': ['Boolean', 'prototype'],
'%DataViewPrototype%': ['DataView', 'prototype'],
'%DatePrototype%': ['Date', 'prototype'],
'%ErrorPrototype%': ['Error', 'prototype'],
'%EvalErrorPrototype%': ['EvalError', 'prototype'],
'%Float32ArrayPrototype%': ['Float32Array', 'prototype'],
'%Float64ArrayPrototype%': ['Float64Array', 'prototype'],
'%FunctionPrototype%': ['Function', 'prototype'],
'%Generator%': ['GeneratorFunction', 'prototype'],
'%GeneratorPrototype%': ['GeneratorFunction', 'prototype', 'prototype'],
'%Int8ArrayPrototype%': ['Int8Array', 'prototype'],
'%Int16ArrayPrototype%': ['Int16Array', 'prototype'],
'%Int32ArrayPrototype%': ['Int32Array', 'prototype'],
'%JSONParse%': ['JSON', 'parse'],
'%JSONStringify%': ['JSON', 'stringify'],
'%MapPrototype%': ['Map', 'prototype'],
'%NumberPrototype%': ['Number', 'prototype'],
'%ObjectPrototype%': ['Object', 'prototype'],
'%ObjProto_toString%': ['Object', 'prototype', 'toString'],
'%ObjProto_valueOf%': ['Object', 'prototype', 'valueOf'],
'%PromisePrototype%': ['Promise', 'prototype'],
'%PromiseProto_then%': ['Promise', 'prototype', 'then'],
'%Promise_all%': ['Promise', 'all'],
'%Promise_reject%': ['Promise', 'reject'],
'%Promise_resolve%': ['Promise', 'resolve'],
'%RangeErrorPrototype%': ['RangeError', 'prototype'],
'%ReferenceErrorPrototype%': ['ReferenceError', 'prototype'],
'%RegExpPrototype%': ['RegExp', 'prototype'],
'%SetPrototype%': ['Set', 'prototype'],
'%SharedArrayBufferPrototype%': ['SharedArrayBuffer', 'prototype'],
'%StringPrototype%': ['String', 'prototype'],
'%SymbolPrototype%': ['Symbol', 'prototype'],
'%SyntaxErrorPrototype%': ['SyntaxError', 'prototype'],
'%TypedArrayPrototype%': ['TypedArray', 'prototype'],
'%TypeErrorPrototype%': ['TypeError', 'prototype'],
'%Uint8ArrayPrototype%': ['Uint8Array', 'prototype'],
'%Uint8ClampedArrayPrototype%': ['Uint8ClampedArray', 'prototype'],
'%Uint16ArrayPrototype%': ['Uint16Array', 'prototype'],
'%Uint32ArrayPrototype%': ['Uint32Array', 'prototype'],
'%URIErrorPrototype%': ['URIError', 'prototype'],
'%WeakMapPrototype%': ['WeakMap', 'prototype'],
'%WeakSetPrototype%': ['WeakSet', 'prototype']
};
var bind = require('function-bind');
var hasOwn = require('has');
var $concat = bind.call(Function.call, Array.prototype.concat);
var $spliceApply = bind.call(Function.apply, Array.prototype.splice);
var $replace = bind.call(Function.call, String.prototype.replace);
var $strSlice = bind.call(Function.call, String.prototype.slice);
/* adapted from https://github.com/lodash/lodash/blob/4.17.15/dist/lodash.js#L6735-L6744 */
var rePropName = /[^%.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|%$))/g;
var reEscapeChar = /\\(\\)?/g; /** Used to match backslashes in property paths. */
var stringToPath = function stringToPath(string) {
var first = $strSlice(string, 0, 1);
var last = $strSlice(string, -1);
if (first === '%' && last !== '%') {
throw new $SyntaxError('invalid intrinsic syntax, expected closing `%`');
} else if (last === '%' && first !== '%') {
throw new $SyntaxError('invalid intrinsic syntax, expected opening `%`');
}
var result = [];
$replace(string, rePropName, function (match, number, quote, subString) {
result[result.length] = quote ? $replace(subString, reEscapeChar, '$1') : number || match;
});
return result;
};
/* end adaptation */
var getBaseIntrinsic = function getBaseIntrinsic(name, allowMissing) {
var intrinsicName = name;
var alias;
if (hasOwn(LEGACY_ALIASES, intrinsicName)) {
alias = LEGACY_ALIASES[intrinsicName];
intrinsicName = '%' + alias[0] + '%';
}
if (hasOwn(INTRINSICS, intrinsicName)) {
var value = INTRINSICS[intrinsicName];
if (value === needsEval) {
value = doEval(intrinsicName);
}
if (typeof value === 'undefined' && !allowMissing) {
throw new $TypeError('intrinsic ' + name + ' exists, but is not available. Please file an issue!');
}
return {
alias: alias,
name: intrinsicName,
value: value
};
}
throw new $SyntaxError('intrinsic ' + name + ' does not exist!');
};
module.exports = function GetIntrinsic(name, allowMissing) {
if (typeof name !== 'string' || name.length === 0) {
throw new $TypeError('intrinsic name must be a non-empty string');
}
if (arguments.length > 1 && typeof allowMissing !== 'boolean') {
throw new $TypeError('"allowMissing" argument must be a boolean');
}
var parts = stringToPath(name);
var intrinsicBaseName = parts.length > 0 ? parts[0] : '';
var intrinsic = getBaseIntrinsic('%' + intrinsicBaseName + '%', allowMissing);
var intrinsicRealName = intrinsic.name;
var value = intrinsic.value;
var skipFurtherCaching = false;
var alias = intrinsic.alias;
if (alias) {
intrinsicBaseName = alias[0];
$spliceApply(parts, $concat([0, 1], alias));
}
for (var i = 1, isOwn = true; i < parts.length; i += 1) {
var part = parts[i];
var first = $strSlice(part, 0, 1);
var last = $strSlice(part, -1);
if (
(
(first === '"' || first === "'" || first === '`')
|| (last === '"' || last === "'" || last === '`')
)
&& first !== last
) {
throw new $SyntaxError('property names with quotes must have matching quotes');
}
if (part === 'constructor' || !isOwn) {
skipFurtherCaching = true;
}
intrinsicBaseName += '.' + part;
intrinsicRealName = '%' + intrinsicBaseName + '%';
if (hasOwn(INTRINSICS, intrinsicRealName)) {
value = INTRINSICS[intrinsicRealName];
} else if (value != null) {
if (!(part in value)) {
if (!allowMissing) {
throw new $TypeError('base intrinsic for ' + name + ' exists, but the property is not available.');
}
return void undefined;
}
if ($gOPD && (i + 1) >= parts.length) {
var desc = $gOPD(value, part);
isOwn = !!desc;
// By convention, when a data property is converted to an accessor
// property to emulate a data property that does not suffer from
// the override mistake, that accessor's getter is marked with
// an `originalValue` property. Here, when we detect this, we
// uphold the illusion by pretending to see that original data
// property, i.e., returning the value rather than the getter
// itself.
if (isOwn && 'get' in desc && !('originalValue' in desc.get)) {
value = desc.get;
} else {
value = value[part];
}
} else {
isOwn = hasOwn(value, part);
value = value[part];
}
if (isOwn && !skipFurtherCaching) {
INTRINSICS[intrinsicRealName] = value;
}
}
}
return value;
};
},{"function-bind":89,"has":93,"has-symbols":91}],91:[function(require,module,exports){
'use strict';
var origSymbol = typeof Symbol !== 'undefined' && Symbol;
var hasSymbolSham = require('./shams');
module.exports = function hasNativeSymbols() {
if (typeof origSymbol !== 'function') { return false; }
if (typeof Symbol !== 'function') { return false; }
if (typeof origSymbol('foo') !== 'symbol') { return false; }
if (typeof Symbol('bar') !== 'symbol') { return false; }
return hasSymbolSham();
};
},{"./shams":92}],92:[function(require,module,exports){
'use strict';
/* eslint complexity: [2, 18], max-statements: [2, 33] */
module.exports = function hasSymbols() {
if (typeof Symbol !== 'function' || typeof Object.getOwnPropertySymbols !== 'function') { return false; }
if (typeof Symbol.iterator === 'symbol') { return true; }
var obj = {};
var sym = Symbol('test');
var symObj = Object(sym);
if (typeof sym === 'string') { return false; }
if (Object.prototype.toString.call(sym) !== '[object Symbol]') { return false; }
if (Object.prototype.toString.call(symObj) !== '[object Symbol]') { return false; }
// temp disabled per https://github.com/ljharb/object.assign/issues/17
// if (sym instanceof Symbol) { return false; }
// temp disabled per https://github.com/WebReflection/get-own-property-symbols/issues/4
// if (!(symObj instanceof Symbol)) { return false; }
// if (typeof Symbol.prototype.toString !== 'function') { return false; }
// if (String(sym) !== Symbol.prototype.toString.call(sym)) { return false; }
var symVal = 42;
obj[sym] = symVal;
for (sym in obj) { return false; } // eslint-disable-line no-restricted-syntax, no-unreachable-loop
if (typeof Object.keys === 'function' && Object.keys(obj).length !== 0) { return false; }
if (typeof Object.getOwnPropertyNames === 'function' && Object.getOwnPropertyNames(obj).length !== 0) { return false; }
var syms = Object.getOwnPropertySymbols(obj);
if (syms.length !== 1 || syms[0] !== sym) { return false; }
if (!Object.prototype.propertyIsEnumerable.call(obj, sym)) { return false; }
if (typeof Object.getOwnPropertyDescriptor === 'function') {
var descriptor = Object.getOwnPropertyDescriptor(obj, sym);
if (descriptor.value !== symVal || descriptor.enumerable !== true) { return false; }
}
return true;
};
},{}],93:[function(require,module,exports){
'use strict';
var bind = require('function-bind');
module.exports = bind.call(Function.call, Object.prototype.hasOwnProperty);
},{"function-bind":89}],94:[function(require,module,exports){
/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh <https://feross.org/opensource> */
exports.read = function (buffer, offset, isLE, mLen, nBytes) {
var e, m
var eLen = (nBytes * 8) - mLen - 1
var eMax = (1 << eLen) - 1
var eBias = eMax >> 1
var nBits = -7
var i = isLE ? (nBytes - 1) : 0
var d = isLE ? -1 : 1
var s = buffer[offset + i]
i += d
e = s & ((1 << (-nBits)) - 1)
s >>= (-nBits)
nBits += eLen
for (; nBits > 0; e = (e * 256) + buffer[offset + i], i += d, nBits -= 8) {}
m = e & ((1 << (-nBits)) - 1)
e >>= (-nBits)
nBits += mLen
for (; nBits > 0; m = (m * 256) + buffer[offset + i], i += d, nBits -= 8) {}
if (e === 0) {
e = 1 - eBias
} else if (e === eMax) {
return m ? NaN : ((s ? -1 : 1) * Infinity)
} else {
m = m + Math.pow(2, mLen)
e = e - eBias
}
return (s ? -1 : 1) * m * Math.pow(2, e - mLen)
}
exports.write = function (buffer, value, offset, isLE, mLen, nBytes) {
var e, m, c
var eLen = (nBytes * 8) - mLen - 1
var eMax = (1 << eLen) - 1
var eBias = eMax >> 1
var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0)
var i = isLE ? 0 : (nBytes - 1)
var d = isLE ? 1 : -1
var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0
value = Math.abs(value)
if (isNaN(value) || value === Infinity) {
m = isNaN(value) ? 1 : 0
e = eMax
} else {
e = Math.floor(Math.log(value) / Math.LN2)
if (value * (c = Math.pow(2, -e)) < 1) {
e--
c *= 2
}
if (e + eBias >= 1) {
value += rt / c
} else {
value += rt * Math.pow(2, 1 - eBias)
}
if (value * c >= 2) {
e++
c /= 2
}
if (e + eBias >= eMax) {
m = 0
e = eMax
} else if (e + eBias >= 1) {
m = ((value * c) - 1) * Math.pow(2, mLen)
e = e + eBias
} else {
m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen)
e = 0
}
}
for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {}
e = (e << mLen) | m
eLen += mLen
for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {}
buffer[offset + i - d] |= s * 128
}
},{}],95:[function(require,module,exports){
if (typeof Object.create === 'function') {
// implementation from standard node.js 'util' module
module.exports = function inherits(ctor, superCtor) {
if (superCtor) {
ctor.super_ = superCtor
ctor.prototype = Object.create(superCtor.prototype, {
constructor: {
value: ctor,
enumerable: false,
writable: true,
configurable: true
}
})
}
};
} else {
// old school shim for old browsers
module.exports = function inherits(ctor, superCtor) {
if (superCtor) {
ctor.super_ = superCtor
var TempCtor = function () {}
TempCtor.prototype = superCtor.prototype
ctor.prototype = new TempCtor()
ctor.prototype.constructor = ctor
}
}
}
},{}],96:[function(require,module,exports){
'use strict';
var fnToStr = Function.prototype.toString;
var reflectApply = typeof Reflect === 'object' && Reflect !== null && Reflect.apply;
var badArrayLike;
var isCallableMarker;
if (typeof reflectApply === 'function' && typeof Object.defineProperty === 'function') {
try {
badArrayLike = Object.defineProperty({}, 'length', {
get: function () {
throw isCallableMarker;
}
});
isCallableMarker = {};
// eslint-disable-next-line no-throw-literal
reflectApply(function () { throw 42; }, null, badArrayLike);
} catch (_) {
if (_ !== isCallableMarker) {
reflectApply = null;
}
}
} else {
reflectApply = null;
}
var constructorRegex = /^\s*class\b/;
var isES6ClassFn = function isES6ClassFunction(value) {
try {
var fnStr = fnToStr.call(value);
return constructorRegex.test(fnStr);
} catch (e) {
return false; // not a function
}
};
var tryFunctionObject = function tryFunctionToStr(value) {
try {
if (isES6ClassFn(value)) { return false; }
fnToStr.call(value);
return true;
} catch (e) {
return false;
}
};
var toStr = Object.prototype.toString;
var fnClass = '[object Function]';
var genClass = '[object GeneratorFunction]';
var hasToStringTag = typeof Symbol === 'function' && !!Symbol.toStringTag; // better: use `has-tostringtag`
/* globals document: false */
var documentDotAll = typeof document === 'object' && typeof document.all === 'undefined' && document.all !== undefined ? document.all : {};
module.exports = reflectApply
? function isCallable(value) {
if (value === documentDotAll) { return true; }
if (!value) { return false; }
if (typeof value !== 'function' && typeof value !== 'object') { return false; }
if (typeof value === 'function' && !value.prototype) { return true; }
try {
reflectApply(value, null, badArrayLike);
} catch (e) {
if (e !== isCallableMarker) { return false; }
}
return !isES6ClassFn(value);
}
: function isCallable(value) {
if (value === documentDotAll) { return true; }
if (!value) { return false; }
if (typeof value !== 'function' && typeof value !== 'object') { return false; }
if (typeof value === 'function' && !value.prototype) { return true; }
if (hasToStringTag) { return tryFunctionObject(value); }
if (isES6ClassFn(value)) { return false; }
var strClass = toStr.call(value);
return strClass === fnClass || strClass === genClass;
};
},{}],97:[function(require,module,exports){
'use strict';
var keysShim;
if (!Object.keys) {
// modified from https://github.com/es-shims/es5-shim
var has = Object.prototype.hasOwnProperty;
var toStr = Object.prototype.toString;
var isArgs = require('./isArguments'); // eslint-disable-line global-require
var isEnumerable = Object.prototype.propertyIsEnumerable;
var hasDontEnumBug = !isEnumerable.call({ toString: null }, 'toString');
var hasProtoEnumBug = isEnumerable.call(function () {}, 'prototype');
var dontEnums = [
'toString',
'toLocaleString',
'valueOf',
'hasOwnProperty',
'isPrototypeOf',
'propertyIsEnumerable',
'constructor'
];
var equalsConstructorPrototype = function (o) {
var ctor = o.constructor;
return ctor && ctor.prototype === o;
};
var excludedKeys = {
$applicationCache: true,
$console: true,
$external: true,
$frame: true,
$frameElement: true,
$frames: true,
$innerHeight: true,
$innerWidth: true,
$onmozfullscreenchange: true,
$onmozfullscreenerror: true,
$outerHeight: true,
$outerWidth: true,
$pageXOffset: true,
$pageYOffset: true,
$parent: true,
$scrollLeft: true,
$scrollTop: true,
$scrollX: true,
$scrollY: true,
$self: true,
$webkitIndexedDB: true,
$webkitStorageInfo: true,
$window: true
};
var hasAutomationEqualityBug = (function () {
/* global window */
if (typeof window === 'undefined') { return false; }
for (var k in window) {
try {
if (!excludedKeys['$' + k] && has.call(window, k) && window[k] !== null && typeof window[k] === 'object') {
try {
equalsConstructorPrototype(window[k]);
} catch (e) {
return true;
}
}
} catch (e) {
return true;
}
}
return false;
}());
var equalsConstructorPrototypeIfNotBuggy = function (o) {
/* global window */
if (typeof window === 'undefined' || !hasAutomationEqualityBug) {
return equalsConstructorPrototype(o);
}
try {
return equalsConstructorPrototype(o);
} catch (e) {
return false;
}
};
keysShim = function keys(object) {
var isObject = object !== null && typeof object === 'object';
var isFunction = toStr.call(object) === '[object Function]';
var isArguments = isArgs(object);
var isString = isObject && toStr.call(object) === '[object String]';
var theKeys = [];
if (!isObject && !isFunction && !isArguments) {
throw new TypeError('Object.keys called on a non-object');
}
var skipProto = hasProtoEnumBug && isFunction;
if (isString && object.length > 0 && !has.call(object, 0)) {
for (var i = 0; i < object.length; ++i) {
theKeys.push(String(i));
}
}
if (isArguments && object.length > 0) {
for (var j = 0; j < object.length; ++j) {
theKeys.push(String(j));
}
} else {
for (var name in object) {
if (!(skipProto && name === 'prototype') && has.call(object, name)) {
theKeys.push(String(name));
}
}
}
if (hasDontEnumBug) {
var skipConstructor = equalsConstructorPrototypeIfNotBuggy(object);
for (var k = 0; k < dontEnums.length; ++k) {
if (!(skipConstructor && dontEnums[k] === 'constructor') && has.call(object, dontEnums[k])) {
theKeys.push(dontEnums[k]);
}
}
}
return theKeys;
};
}
module.exports = keysShim;
},{"./isArguments":99}],98:[function(require,module,exports){
'use strict';
var slice = Array.prototype.slice;
var isArgs = require('./isArguments');
var origKeys = Object.keys;
var keysShim = origKeys ? function keys(o) { return origKeys(o); } : require('./implementation');
var originalKeys = Object.keys;
keysShim.shim = function shimObjectKeys() {
if (Object.keys) {
var keysWorksWithArguments = (function () {
// Safari 5.0 bug
var args = Object.keys(arguments);
return args && args.length === arguments.length;
}(1, 2));
if (!keysWorksWithArguments) {
Object.keys = function keys(object) { // eslint-disable-line func-name-matching
if (isArgs(object)) {
return originalKeys(slice.call(object));
}
return originalKeys(object);
};
}
} else {
Object.keys = keysShim;
}
return Object.keys || keysShim;
};
module.exports = keysShim;
},{"./implementation":97,"./isArguments":99}],99:[function(require,module,exports){
'use strict';
var toStr = Object.prototype.toString;
module.exports = function isArguments(value) {
var str = toStr.call(value);
var isArgs = str === '[object Arguments]';
if (!isArgs) {
isArgs = str !== '[object Array]' &&
value !== null &&
typeof value === 'object' &&
typeof value.length === 'number' &&
value.length >= 0 &&
toStr.call(value.callee) === '[object Function]';
}
return isArgs;
};
},{}],100:[function(require,module,exports){
// shim for using process in browser
var process = module.exports = {};
// cached from whatever global is present so that test runners that stub it
// don't break things. But we need to wrap it in a try catch in case it is
// wrapped in strict mode code which doesn't define any globals. It's inside a
// function because try/catches deoptimize in certain engines.
var cachedSetTimeout;
var cachedClearTimeout;
function defaultSetTimout() {
throw new Error('setTimeout has not been defined');
}
function defaultClearTimeout () {
throw new Error('clearTimeout has not been defined');
}
(function () {
try {
if (typeof setTimeout === 'function') {
cachedSetTimeout = setTimeout;
} else {
cachedSetTimeout = defaultSetTimout;
}
} catch (e) {
cachedSetTimeout = defaultSetTimout;
}
try {
if (typeof clearTimeout === 'function') {
cachedClearTimeout = clearTimeout;
} else {
cachedClearTimeout = defaultClearTimeout;
}
} catch (e) {
cachedClearTimeout = defaultClearTimeout;
}
} ())
function runTimeout(fun) {
if (cachedSetTimeout === setTimeout) {
//normal enviroments in sane situations
return setTimeout(fun, 0);
}
// if setTimeout wasn't available but was latter defined
if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) {
cachedSetTimeout = setTimeout;
return setTimeout(fun, 0);
}
try {
// when when somebody has screwed with setTimeout but no I.E. maddness
return cachedSetTimeout(fun, 0);
} catch(e){
try {
// When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
return cachedSetTimeout.call(null, fun, 0);
} catch(e){
// same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error
return cachedSetTimeout.call(this, fun, 0);
}
}
}
function runClearTimeout(marker) {
if (cachedClearTimeout === clearTimeout) {
//normal enviroments in sane situations
return clearTimeout(marker);
}
// if clearTimeout wasn't available but was latter defined
if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) {
cachedClearTimeout = clearTimeout;
return clearTimeout(marker);
}
try {
// when when somebody has screwed with setTimeout but no I.E. maddness
return cachedClearTimeout(marker);
} catch (e){
try {
// When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
return cachedClearTimeout.call(null, marker);
} catch (e){
// same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error.
// Some versions of I.E. have different rules for clearTimeout vs setTimeout
return cachedClearTimeout.call(this, marker);
}
}
}
var queue = [];
var draining = false;
var currentQueue;
var queueIndex = -1;
function cleanUpNextTick() {
if (!draining || !currentQueue) {
return;
}
draining = false;
if (currentQueue.length) {
queue = currentQueue.concat(queue);
} else {
queueIndex = -1;
}
if (queue.length) {
drainQueue();
}
}
function drainQueue() {
if (draining) {
return;
}
var timeout = runTimeout(cleanUpNextTick);
draining = true;
var len = queue.length;
while(len) {
currentQueue = queue;
queue = [];
while (++queueIndex < len) {
if (currentQueue) {
currentQueue[queueIndex].run();
}
}
queueIndex = -1;
len = queue.length;
}
currentQueue = null;
draining = false;
runClearTimeout(timeout);
}
process.nextTick = function (fun) {
var args = new Array(arguments.length - 1);
if (arguments.length > 1) {
for (var i = 1; i < arguments.length; i++) {
args[i - 1] = arguments[i];
}
}
queue.push(new Item(fun, args));
if (queue.length === 1 && !draining) {
runTimeout(drainQueue);
}
};
// v8 likes predictible objects
function Item(fun, array) {
this.fun = fun;
this.array = array;
}
Item.prototype.run = function () {
this.fun.apply(null, this.array);
};
process.title = 'browser';
process.browser = true;
process.env = {};
process.argv = [];
process.version = ''; // empty string to avoid regexp issues
process.versions = {};
function noop() {}
process.on = noop;
process.addListener = noop;
process.once = noop;
process.off = noop;
process.removeListener = noop;
process.removeAllListeners = noop;
process.emit = noop;
process.prependListener = noop;
process.prependOnceListener = noop;
process.listeners = function (name) { return [] }
process.binding = function (name) {
throw new Error('process.binding is not supported');
};
process.cwd = function () { return '/' };
process.chdir = function (dir) {
throw new Error('process.chdir is not supported');
};
process.umask = function() { return 0; };
},{}],101:[function(require,module,exports){
(function (process,setImmediate){(function (){
var through = require('through');
var nextTick = typeof setImmediate !== 'undefined'
? setImmediate
: process.nextTick
;
module.exports = function (write, end) {
var tr = through(write, end);
tr.pause();
var resume = tr.resume;
var pause = tr.pause;
var paused = false;
tr.pause = function () {
paused = true;
return pause.apply(this, arguments);
};
tr.resume = function () {
paused = false;
return resume.apply(this, arguments);
};
nextTick(function () {
if (!paused) tr.resume();
});
return tr;
};
}).call(this)}).call(this,require('_process'),require("timers").setImmediate)
},{"_process":100,"through":114,"timers":115}],102:[function(require,module,exports){
/* eslint-disable node/no-deprecated-api */
var buffer = require('buffer')
var Buffer = buffer.Buffer
// alternative to using Object.keys for old browsers
function copyProps (src, dst) {
for (var key in src) {
dst[key] = src[key]
}
}
if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) {
module.exports = buffer
} else {
// Copy properties from require('buffer')
copyProps(buffer, exports)
exports.Buffer = SafeBuffer
}
function SafeBuffer (arg, encodingOrOffset, length) {
return Buffer(arg, encodingOrOffset, length)
}
// Copy static methods from Buffer
copyProps(Buffer, SafeBuffer)
SafeBuffer.from = function (arg, encodingOrOffset, length) {
if (typeof arg === 'number') {
throw new TypeError('Argument must not be a number')
}
return Buffer(arg, encodingOrOffset, length)
}
SafeBuffer.alloc = function (size, fill, encoding) {
if (typeof size !== 'number') {
throw new TypeError('Argument must be a number')
}
var buf = Buffer(size)
if (fill !== undefined) {
if (typeof encoding === 'string') {
buf.fill(fill, encoding)
} else {
buf.fill(fill)
}
} else {
buf.fill(0)
}
return buf
}
SafeBuffer.allocUnsafe = function (size) {
if (typeof size !== 'number') {
throw new TypeError('Argument must be a number')
}
return Buffer(size)
}
SafeBuffer.allocUnsafeSlow = function (size) {
if (typeof size !== 'number') {
throw new TypeError('Argument must be a number')
}
return buffer.SlowBuffer(size)
}
},{"buffer":25}],103:[function(require,module,exports){
'use strict';
var bind = require('function-bind');
var ES = require('es-abstract/es5');
var replace = bind.call(Function.call, String.prototype.replace);
var leftWhitespace = /^[\x09\x0A\x0B\x0C\x0D\x20\xA0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000\u2028\u2029\uFEFF]+/;
var rightWhitespace = /[\x09\x0A\x0B\x0C\x0D\x20\xA0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000\u2028\u2029\uFEFF]+$/;
module.exports = function trim() {
var S = ES.ToString(ES.CheckObjectCoercible(this));
return replace(replace(S, leftWhitespace, ''), rightWhitespace, '');
};
},{"es-abstract/es5":77,"function-bind":89}],104:[function(require,module,exports){
'use strict';
var bind = require('function-bind');
var define = require('define-properties');
var implementation = require('./implementation');
var getPolyfill = require('./polyfill');
var shim = require('./shim');
var boundTrim = bind.call(Function.call, getPolyfill());
define(boundTrim, {
getPolyfill: getPolyfill,
implementation: implementation,
shim: shim
});
module.exports = boundTrim;
},{"./implementation":103,"./polyfill":105,"./shim":106,"define-properties":31,"function-bind":89}],105:[function(require,module,exports){
'use strict';
var implementation = require('./implementation');
var zeroWidthSpace = '\u200b';
module.exports = function getPolyfill() {
if (String.prototype.trim && zeroWidthSpace.trim() === zeroWidthSpace) {
return String.prototype.trim;
}
return implementation;
};
},{"./implementation":103}],106:[function(require,module,exports){
'use strict';
var define = require('define-properties');
var getPolyfill = require('./polyfill');
module.exports = function shimStringTrim() {
var polyfill = getPolyfill();
define(String.prototype, { trim: polyfill }, { trim: function () { return String.prototype.trim !== polyfill; } });
return polyfill;
};
},{"./polyfill":105,"define-properties":31}],107:[function(require,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
/*<replacement>*/
var Buffer = require('safe-buffer').Buffer;
/*</replacement>*/
var isEncoding = Buffer.isEncoding || function (encoding) {
encoding = '' + encoding;
switch (encoding && encoding.toLowerCase()) {
case 'hex':case 'utf8':case 'utf-8':case 'ascii':case 'binary':case 'base64':case 'ucs2':case 'ucs-2':case 'utf16le':case 'utf-16le':case 'raw':
return true;
default:
return false;
}
};
function _normalizeEncoding(enc) {
if (!enc) return 'utf8';
var retried;
while (true) {
switch (enc) {
case 'utf8':
case 'utf-8':
return 'utf8';
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return 'utf16le';
case 'latin1':
case 'binary':
return 'latin1';
case 'base64':
case 'ascii':
case 'hex':
return enc;
default:
if (retried) return; // undefined
enc = ('' + enc).toLowerCase();
retried = true;
}
}
};
// Do not cache `Buffer.isEncoding` when checking encoding names as some
// modules monkey-patch it to support additional encodings
function normalizeEncoding(enc) {
var nenc = _normalizeEncoding(enc);
if (typeof nenc !== 'string' && (Buffer.isEncoding === isEncoding || !isEncoding(enc))) throw new Error('Unknown encoding: ' + enc);
return nenc || enc;
}
// StringDecoder provides an interface for efficiently splitting a series of
// buffers into a series of JS strings without breaking apart multi-byte
// characters.
exports.StringDecoder = StringDecoder;
function StringDecoder(encoding) {
this.encoding = normalizeEncoding(encoding);
var nb;
switch (this.encoding) {
case 'utf16le':
this.text = utf16Text;
this.end = utf16End;
nb = 4;
break;
case 'utf8':
this.fillLast = utf8FillLast;
nb = 4;
break;
case 'base64':
this.text = base64Text;
this.end = base64End;
nb = 3;
break;
default:
this.write = simpleWrite;
this.end = simpleEnd;
return;
}
this.lastNeed = 0;
this.lastTotal = 0;
this.lastChar = Buffer.allocUnsafe(nb);
}
StringDecoder.prototype.write = function (buf) {
if (buf.length === 0) return '';
var r;
var i;
if (this.lastNeed) {
r = this.fillLast(buf);
if (r === undefined) return '';
i = this.lastNeed;
this.lastNeed = 0;
} else {
i = 0;
}
if (i < buf.length) return r ? r + this.text(buf, i) : this.text(buf, i);
return r || '';
};
StringDecoder.prototype.end = utf8End;
// Returns only complete characters in a Buffer
StringDecoder.prototype.text = utf8Text;
// Attempts to complete a partial non-UTF-8 character using bytes from a Buffer
StringDecoder.prototype.fillLast = function (buf) {
if (this.lastNeed <= buf.length) {
buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, this.lastNeed);
return this.lastChar.toString(this.encoding, 0, this.lastTotal);
}
buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, buf.length);
this.lastNeed -= buf.length;
};
// Checks the type of a UTF-8 byte, whether it's ASCII, a leading byte, or a
// continuation byte. If an invalid byte is detected, -2 is returned.
function utf8CheckByte(byte) {
if (byte <= 0x7F) return 0;else if (byte >> 5 === 0x06) return 2;else if (byte >> 4 === 0x0E) return 3;else if (byte >> 3 === 0x1E) return 4;
return byte >> 6 === 0x02 ? -1 : -2;
}
// Checks at most 3 bytes at the end of a Buffer in order to detect an
// incomplete multi-byte UTF-8 character. The total number of bytes (2, 3, or 4)
// needed to complete the UTF-8 character (if applicable) are returned.
function utf8CheckIncomplete(self, buf, i) {
var j = buf.length - 1;
if (j < i) return 0;
var nb = utf8CheckByte(buf[j]);
if (nb >= 0) {
if (nb > 0) self.lastNeed = nb - 1;
return nb;
}
if (--j < i || nb === -2) return 0;
nb = utf8CheckByte(buf[j]);
if (nb >= 0) {
if (nb > 0) self.lastNeed = nb - 2;
return nb;
}
if (--j < i || nb === -2) return 0;
nb = utf8CheckByte(buf[j]);
if (nb >= 0) {
if (nb > 0) {
if (nb === 2) nb = 0;else self.lastNeed = nb - 3;
}
return nb;
}
return 0;
}
// Validates as many continuation bytes for a multi-byte UTF-8 character as
// needed or are available. If we see a non-continuation byte where we expect
// one, we "replace" the validated continuation bytes we've seen so far with
// a single UTF-8 replacement character ('\ufffd'), to match v8's UTF-8 decoding
// behavior. The continuation byte check is included three times in the case
// where all of the continuation bytes for a character exist in the same buffer.
// It is also done this way as a slight performance increase instead of using a
// loop.
function utf8CheckExtraBytes(self, buf, p) {
if ((buf[0] & 0xC0) !== 0x80) {
self.lastNeed = 0;
return '\ufffd';
}
if (self.lastNeed > 1 && buf.length > 1) {
if ((buf[1] & 0xC0) !== 0x80) {
self.lastNeed = 1;
return '\ufffd';
}
if (self.lastNeed > 2 && buf.length > 2) {
if ((buf[2] & 0xC0) !== 0x80) {
self.lastNeed = 2;
return '\ufffd';
}
}
}
}
// Attempts to complete a multi-byte UTF-8 character using bytes from a Buffer.
function utf8FillLast(buf) {
var p = this.lastTotal - this.lastNeed;
var r = utf8CheckExtraBytes(this, buf, p);
if (r !== undefined) return r;
if (this.lastNeed <= buf.length) {
buf.copy(this.lastChar, p, 0, this.lastNeed);
return this.lastChar.toString(this.encoding, 0, this.lastTotal);
}
buf.copy(this.lastChar, p, 0, buf.length);
this.lastNeed -= buf.length;
}
// Returns all complete UTF-8 characters in a Buffer. If the Buffer ended on a
// partial character, the character's bytes are buffered until the required
// number of bytes are available.
function utf8Text(buf, i) {
var total = utf8CheckIncomplete(this, buf, i);
if (!this.lastNeed) return buf.toString('utf8', i);
this.lastTotal = total;
var end = buf.length - (total - this.lastNeed);
buf.copy(this.lastChar, 0, end);
return buf.toString('utf8', i, end);
}
// For UTF-8, a replacement character is added when ending on a partial
// character.
function utf8End(buf) {
var r = buf && buf.length ? this.write(buf) : '';
if (this.lastNeed) return r + '\ufffd';
return r;
}
// UTF-16LE typically needs two bytes per character, but even if we have an even
// number of bytes available, we need to check if we end on a leading/high
// surrogate. In that case, we need to wait for the next two bytes in order to
// decode the last character properly.
function utf16Text(buf, i) {
if ((buf.length - i) % 2 === 0) {
var r = buf.toString('utf16le', i);
if (r) {
var c = r.charCodeAt(r.length - 1);
if (c >= 0xD800 && c <= 0xDBFF) {
this.lastNeed = 2;
this.lastTotal = 4;
this.lastChar[0] = buf[buf.length - 2];
this.lastChar[1] = buf[buf.length - 1];
return r.slice(0, -1);
}
}
return r;
}
this.lastNeed = 1;
this.lastTotal = 2;
this.lastChar[0] = buf[buf.length - 1];
return buf.toString('utf16le', i, buf.length - 1);
}
// For UTF-16LE we do not explicitly append special replacement characters if we
// end on a partial character, we simply let v8 handle that.
function utf16End(buf) {
var r = buf && buf.length ? this.write(buf) : '';
if (this.lastNeed) {
var end = this.lastTotal - this.lastNeed;
return r + this.lastChar.toString('utf16le', 0, end);
}
return r;
}
function base64Text(buf, i) {
var n = (buf.length - i) % 3;
if (n === 0) return buf.toString('base64', i);
this.lastNeed = 3 - n;
this.lastTotal = 3;
if (n === 1) {
this.lastChar[0] = buf[buf.length - 1];
} else {
this.lastChar[0] = buf[buf.length - 2];
this.lastChar[1] = buf[buf.length - 1];
}
return buf.toString('base64', i, buf.length - n);
}
function base64End(buf) {
var r = buf && buf.length ? this.write(buf) : '';
if (this.lastNeed) return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed);
return r;
}
// Pass bytes on through for single-byte encodings (e.g. ascii, latin1, hex)
function simpleWrite(buf) {
return buf.toString(this.encoding);
}
function simpleEnd(buf) {
return buf && buf.length ? this.write(buf) : '';
}
},{"safe-buffer":102}],108:[function(require,module,exports){
(function (process,setImmediate){(function (){
var defined = require('defined');
var createDefaultStream = require('./lib/default_stream');
var Test = require('./lib/test');
var createResult = require('./lib/results');
var through = require('through');
var canEmitExit = typeof process !== 'undefined' && process
&& typeof process.on === 'function' && process.browser !== true
;
var canExit = typeof process !== 'undefined' && process
&& typeof process.exit === 'function'
;
var nextTick = typeof setImmediate !== 'undefined'
? setImmediate
: process.nextTick
;
exports = module.exports = (function () {
var harness;
var lazyLoad = function () {
return getHarness().apply(this, arguments);
};
lazyLoad.only = function () {
return getHarness().only.apply(this, arguments);
};
lazyLoad.createStream = function (opts) {
if (!opts) opts = {};
if (!harness) {
var output = through();
getHarness({ stream: output, objectMode: opts.objectMode });
return output;
}
return harness.createStream(opts);
};
lazyLoad.onFinish = function () {
return getHarness().onFinish.apply(this, arguments);
};
lazyLoad.getHarness = getHarness
return lazyLoad
function getHarness (opts) {
if (!opts) opts = {};
opts.autoclose = !canEmitExit;
if (!harness) harness = createExitHarness(opts);
return harness;
}
})();
function createExitHarness (conf) {
if (!conf) conf = {};
var harness = createHarness({
autoclose: defined(conf.autoclose, false)
});
var stream = harness.createStream({ objectMode: conf.objectMode });
var es = stream.pipe(conf.stream || createDefaultStream());
if (canEmitExit) {
es.on('error', function (err) { harness._exitCode = 1 });
}
var ended = false;
stream.on('end', function () { ended = true });
if (conf.exit === false) return harness;
if (!canEmitExit || !canExit) return harness;
var inErrorState = false;
process.on('exit', function (code) {
// let the process exit cleanly.
if (code !== 0) {
return
}
if (!ended) {
var only = harness._results._only;
for (var i = 0; i < harness._tests.length; i++) {
var t = harness._tests[i];
if (only && t.name !== only) continue;
t._exit();
}
}
harness.close();
process.exit(code || harness._exitCode);
});
return harness;
}
exports.createHarness = createHarness;
exports.Test = Test;
exports.test = exports; // tap compat
exports.test.skip = Test.skip;
var exitInterval;
function createHarness (conf_) {
if (!conf_) conf_ = {};
var results = createResult();
if (conf_.autoclose !== false) {
results.once('done', function () { results.close() });
}
var test = function (name, conf, cb) {
var t = new Test(name, conf, cb);
test._tests.push(t);
(function inspectCode (st) {
st.on('test', function sub (st_) {
inspectCode(st_);
});
st.on('result', function (r) {
if (!r.ok && typeof r !== 'string') test._exitCode = 1
});
})(t);
results.push(t);
return t;
};
test._results = results;
test._tests = [];
test.createStream = function (opts) {
return results.createStream(opts);
};
test.onFinish = function (cb) {
results.on('done', cb);
};
var only = false;
test.only = function (name) {
if (only) throw new Error('there can only be one only test');
results.only(name);
only = true;
return test.apply(null, arguments);
};
test._exitCode = 0;
test.close = function () { results.close() };
return test;
}
}).call(this)}).call(this,require('_process'),require("timers").setImmediate)
},{"./lib/default_stream":109,"./lib/results":111,"./lib/test":112,"_process":100,"defined":32,"through":114,"timers":115}],109:[function(require,module,exports){
(function (process){(function (){
var through = require('through');
var fs = require('fs');
module.exports = function () {
var line = '';
var stream = through(write, flush);
return stream;
function write (buf) {
for (var i = 0; i < buf.length; i++) {
var c = typeof buf === 'string'
? buf.charAt(i)
: String.fromCharCode(buf[i])
;
if (c === '\n') flush();
else line += c;
}
}
function flush () {
if (fs.writeSync && /^win/.test(process.platform)) {
try { fs.writeSync(1, line + '\n'); }
catch (e) { stream.emit('error', e) }
}
else {
try { console.log(line) }
catch (e) { stream.emit('error', e) }
}
line = '';
}
};
}).call(this)}).call(this,require('_process'))
},{"_process":100,"fs":7,"through":114}],110:[function(require,module,exports){
(function (process,setImmediate){(function (){
module.exports = typeof setImmediate !== 'undefined'
? setImmediate
: process.nextTick
;
}).call(this)}).call(this,require('_process'),require("timers").setImmediate)
},{"_process":100,"timers":115}],111:[function(require,module,exports){
(function (process,setImmediate){(function (){
var EventEmitter = require('events').EventEmitter;
var inherits = require('inherits');
var through = require('through');
var resumer = require('resumer');
var inspect = require('object-inspect');
var bind = require('function-bind');
var has = require('has');
var regexpTest = bind.call(Function.call, RegExp.prototype.test);
var yamlIndicators = /\:|\-|\?/;
var nextTick = typeof setImmediate !== 'undefined'
? setImmediate
: process.nextTick
;
module.exports = Results;
inherits(Results, EventEmitter);
function Results () {
if (!(this instanceof Results)) return new Results;
this.count = 0;
this.fail = 0;
this.pass = 0;
this._stream = through();
this.tests = [];
}
Results.prototype.createStream = function (opts) {
if (!opts) opts = {};
var self = this;
var output, testId = 0;
if (opts.objectMode) {
output = through();
self.on('_push', function ontest (t, extra) {
if (!extra) extra = {};
var id = testId++;
t.once('prerun', function () {
var row = {
type: 'test',
name: t.name,
id: id
};
if (has(extra, 'parent')) {
row.parent = extra.parent;
}
output.queue(row);
});
t.on('test', function (st) {
ontest(st, { parent: id });
});
t.on('result', function (res) {
res.test = id;
res.type = 'assert';
output.queue(res);
});
t.on('end', function () {
output.queue({ type: 'end', test: id });
});
});
self.on('done', function () { output.queue(null) });
}
else {
output = resumer();
output.queue('TAP version 13\n');
self._stream.pipe(output);
}
nextTick(function next() {
var t;
while (t = getNextTest(self)) {
t.run();
if (!t.ended) return t.once('end', function(){ nextTick(next); });
}
self.emit('done');
});
return output;
};
Results.prototype.push = function (t) {
var self = this;
self.tests.push(t);
self._watch(t);
self.emit('_push', t);
};
Results.prototype.only = function (name) {
this._only = name;
};
Results.prototype._watch = function (t) {
var self = this;
var write = function (s) { self._stream.queue(s) };
t.once('prerun', function () {
write('# ' + t.name + '\n');
});
t.on('result', function (res) {
if (typeof res === 'string') {
write('# ' + res + '\n');
return;
}
write(encodeResult(res, self.count + 1));
self.count ++;
if (res.ok) self.pass ++
else self.fail ++
});
t.on('test', function (st) { self._watch(st) });
};
Results.prototype.close = function () {
var self = this;
if (self.closed) self._stream.emit('error', new Error('ALREADY CLOSED'));
self.closed = true;
var write = function (s) { self._stream.queue(s) };
write('\n1..' + self.count + '\n');
write('# tests ' + self.count + '\n');
write('# pass ' + self.pass + '\n');
if (self.fail) write('# fail ' + self.fail + '\n')
else write('\n# ok\n')
self._stream.queue(null);
};
function encodeResult (res, count) {
var output = '';
output += (res.ok ? 'ok ' : 'not ok ') + count;
output += res.name ? ' ' + res.name.toString().replace(/\s+/g, ' ') : '';
if (res.skip) output += ' # SKIP';
else if (res.todo) output += ' # TODO';
output += '\n';
if (res.ok) return output;
var outer = ' ';
var inner = outer + ' ';
output += outer + '---\n';
output += inner + 'operator: ' + res.operator + '\n';
if (has(res, 'expected') || has(res, 'actual')) {
var ex = inspect(res.expected);
var ac = inspect(res.actual);
if (Math.max(ex.length, ac.length) > 65 || invalidYaml(ex) || invalidYaml(ac)) {
output += inner + 'expected: |-\n' + inner + ' ' + ex + '\n';
output += inner + 'actual: |-\n' + inner + ' ' + ac + '\n';
}
else {
output += inner + 'expected: ' + ex + '\n';
output += inner + 'actual: ' + ac + '\n';
}
}
if (res.at) {
output += inner + 'at: ' + res.at + '\n';
}
if (res.operator === 'error' && res.actual && res.actual.stack) {
var lines = String(res.actual.stack).split('\n');
output += inner + 'stack: |-\n';
for (var i = 0; i < lines.length; i++) {
output += inner + ' ' + lines[i] + '\n';
}
}
output += outer + '...\n';
return output;
}
function getNextTest (results) {
if (!results._only) {
return results.tests.shift();
}
do {
var t = results.tests.shift();
if (!t) continue;
if (results._only === t.name) {
return t;
}
} while (results.tests.length !== 0)
}
function invalidYaml (str) {
return regexpTest(yamlIndicators, str);
}
}).call(this)}).call(this,require('_process'),require("timers").setImmediate)
},{"_process":100,"events":8,"function-bind":89,"has":93,"inherits":95,"object-inspect":113,"resumer":101,"through":114,"timers":115}],112:[function(require,module,exports){
(function (__dirname){(function (){
var deepEqual = require('deep-equal');
var defined = require('defined');
var path = require('path');
var inherits = require('inherits');
var EventEmitter = require('events').EventEmitter;
var has = require('has');
var trim = require('string.prototype.trim');
var nextTick = require('./next_tick');
module.exports = Test;
inherits(Test, EventEmitter);
var getTestArgs = function (name_, opts_, cb_) {
var name = '(anonymous)';
var opts = {};
var cb;
for (var i = 0; i < arguments.length; i++) {
var arg = arguments[i];
var t = typeof arg;
if (t === 'string') {
name = arg;
}
else if (t === 'object') {
opts = arg || opts;
}
else if (t === 'function') {
cb = arg;
}
}
return { name: name, opts: opts, cb: cb };
};
function Test (name_, opts_, cb_) {
if (! (this instanceof Test)) {
return new Test(name_, opts_, cb_);
}
var args = getTestArgs(name_, opts_, cb_);
this.readable = true;
this.name = args.name || '(anonymous)';
this.assertCount = 0;
this.pendingCount = 0;
this._skip = args.opts.skip || false;
this._timeout = args.opts.timeout;
this._plan = undefined;
this._cb = args.cb;
this._progeny = [];
this._ok = true;
for (var prop in this) {
this[prop] = (function bind(self, val) {
if (typeof val === 'function') {
return function bound() {
return val.apply(self, arguments);
};
}
else return val;
})(this, this[prop]);
}
}
Test.prototype.run = function () {
if (this._skip) {
this.comment('SKIP ' + this.name);
}
if (!this._cb || this._skip) {
return this._end();
}
if (this._timeout != null) {
this.timeoutAfter(this._timeout);
}
this.emit('prerun');
this._cb(this);
this.emit('run');
};
Test.prototype.test = function (name, opts, cb) {
var self = this;
var t = new Test(name, opts, cb);
this._progeny.push(t);
this.pendingCount++;
this.emit('test', t);
t.on('prerun', function () {
self.assertCount++;
})
if (!self._pendingAsserts()) {
nextTick(function () {
self._end();
});
}
nextTick(function() {
if (!self._plan && self.pendingCount == self._progeny.length) {
self._end();
}
});
};
Test.prototype.comment = function (msg) {
var that = this;
trim(msg).split('\n').forEach(function (aMsg) {
that.emit('result', trim(aMsg).replace(/^#\s*/, ''));
});
};
Test.prototype.plan = function (n) {
this._plan = n;
this.emit('plan', n);
};
Test.prototype.timeoutAfter = function(ms) {
if (!ms) throw new Error('timeoutAfter requires a timespan');
var self = this;
var timeout = setTimeout(function() {
self.fail('test timed out after ' + ms + 'ms');
self.end();
}, ms);
this.once('end', function() {
clearTimeout(timeout);
});
}
Test.prototype.end = function (err) {
var self = this;
if (arguments.length >= 1 && !!err) {
this.ifError(err);
}
if (this.calledEnd) {
this.fail('.end() called twice');
}
this.calledEnd = true;
this._end();
};
Test.prototype._end = function (err) {
var self = this;
if (this._progeny.length) {
var t = this._progeny.shift();
t.on('end', function () { self._end() });
t.run();
return;
}
if (!this.ended) this.emit('end');
var pendingAsserts = this._pendingAsserts();
if (!this._planError && this._plan !== undefined && pendingAsserts) {
this._planError = true;
this.fail('plan != count', {
expected : this._plan,
actual : this.assertCount
});
}
this.ended = true;
};
Test.prototype._exit = function () {
if (this._plan !== undefined &&
!this._planError && this.assertCount !== this._plan) {
this._planError = true;
this.fail('plan != count', {
expected : this._plan,
actual : this.assertCount,
exiting : true
});
}
else if (!this.ended) {
this.fail('test exited without ending', {
exiting: true
});
}
};
Test.prototype._pendingAsserts = function () {
if (this._plan === undefined) {
return 1;
}
else {
return this._plan - (this._progeny.length + this.assertCount);
}
};
Test.prototype._assert = function assert (ok, opts) {
var self = this;
var extra = opts.extra || {};
var res = {
id : self.assertCount ++,
ok : Boolean(ok),
skip : defined(extra.skip, opts.skip),
name : defined(extra.message, opts.message, '(unnamed assert)'),
operator : defined(extra.operator, opts.operator)
};
if (has(opts, 'actual') || has(extra, 'actual')) {
res.actual = defined(extra.actual, opts.actual);
}
if (has(opts, 'expected') || has(extra, 'expected')) {
res.expected = defined(extra.expected, opts.expected);
}
this._ok = Boolean(this._ok && ok);
if (!ok) {
res.error = defined(extra.error, opts.error, new Error(res.name));
}
if (!ok) {
var e = new Error('exception');
var err = (e.stack || '').split('\n');
var dir = path.dirname(__dirname) + '/';
for (var i = 0; i < err.length; i++) {
var m = /^[^\s]*\s*\bat\s+(.+)/.exec(err[i]);
if (!m) {
continue;
}
var s = m[1].split(/\s+/);
var filem = /(\/[^:\s]+:(\d+)(?::(\d+))?)/.exec(s[1]);
if (!filem) {
filem = /(\/[^:\s]+:(\d+)(?::(\d+))?)/.exec(s[2]);
if (!filem) {
filem = /(\/[^:\s]+:(\d+)(?::(\d+))?)/.exec(s[3]);
if (!filem) {
continue;
}
}
}
if (filem[1].slice(0, dir.length) === dir) {
continue;
}
res.functionName = s[0];
res.file = filem[1];
res.line = Number(filem[2]);
if (filem[3]) res.column = filem[3];
res.at = m[1];
break;
}
}
self.emit('result', res);
var pendingAsserts = self._pendingAsserts();
if (!pendingAsserts) {
if (extra.exiting) {
self._end();
} else {
nextTick(function () {
self._end();
});
}
}
if (!self._planError && pendingAsserts < 0) {
self._planError = true;
self.fail('plan != count', {
expected : self._plan,
actual : self._plan - pendingAsserts
});
}
};
Test.prototype.fail = function (msg, extra) {
this._assert(false, {
message : msg,
operator : 'fail',
extra : extra
});
};
Test.prototype.pass = function (msg, extra) {
this._assert(true, {
message : msg,
operator : 'pass',
extra : extra
});
};
Test.prototype.skip = function (msg, extra) {
this._assert(true, {
message : msg,
operator : 'skip',
skip : true,
extra : extra
});
};
Test.prototype.ok
= Test.prototype['true']
= Test.prototype.assert
= function (value, msg, extra) {
this._assert(value, {
message : msg,
operator : 'ok',
expected : true,
actual : value,
extra : extra
});
};
Test.prototype.notOk
= Test.prototype['false']
= Test.prototype.notok
= function (value, msg, extra) {
this._assert(!value, {
message : msg,
operator : 'notOk',
expected : false,
actual : value,
extra : extra
});
};
Test.prototype.error
= Test.prototype.ifError
= Test.prototype.ifErr
= Test.prototype.iferror
= function (err, msg, extra) {
this._assert(!err, {
message : defined(msg, String(err)),
operator : 'error',
actual : err,
extra : extra
});
};
Test.prototype.equal
= Test.prototype.equals
= Test.prototype.isEqual
= Test.prototype.is
= Test.prototype.strictEqual
= Test.prototype.strictEquals
= function (a, b, msg, extra) {
this._assert(a === b, {
message : defined(msg, 'should be equal'),
operator : 'equal',
actual : a,
expected : b,
extra : extra
});
};
Test.prototype.notEqual
= Test.prototype.notEquals
= Test.prototype.notStrictEqual
= Test.prototype.notStrictEquals
= Test.prototype.isNotEqual
= Test.prototype.isNot
= Test.prototype.not
= Test.prototype.doesNotEqual
= Test.prototype.isInequal
= function (a, b, msg, extra) {
this._assert(a !== b, {
message : defined(msg, 'should not be equal'),
operator : 'notEqual',
actual : a,
notExpected : b,
extra : extra
});
};
Test.prototype.deepEqual
= Test.prototype.deepEquals
= Test.prototype.isEquivalent
= Test.prototype.same
= function (a, b, msg, extra) {
this._assert(deepEqual(a, b, { strict: true }), {
message : defined(msg, 'should be equivalent'),
operator : 'deepEqual',
actual : a,
expected : b,
extra : extra
});
};
Test.prototype.deepLooseEqual
= Test.prototype.looseEqual
= Test.prototype.looseEquals
= function (a, b, msg, extra) {
this._assert(deepEqual(a, b), {
message : defined(msg, 'should be equivalent'),
operator : 'deepLooseEqual',
actual : a,
expected : b,
extra : extra
});
};
Test.prototype.notDeepEqual
= Test.prototype.notEquivalent
= Test.prototype.notDeeply
= Test.prototype.notSame
= Test.prototype.isNotDeepEqual
= Test.prototype.isNotDeeply
= Test.prototype.isNotEquivalent
= Test.prototype.isInequivalent
= function (a, b, msg, extra) {
this._assert(!deepEqual(a, b, { strict: true }), {
message : defined(msg, 'should not be equivalent'),
operator : 'notDeepEqual',
actual : a,
notExpected : b,
extra : extra
});
};
Test.prototype.notDeepLooseEqual
= Test.prototype.notLooseEqual
= Test.prototype.notLooseEquals
= function (a, b, msg, extra) {
this._assert(!deepEqual(a, b), {
message : defined(msg, 'should be equivalent'),
operator : 'notDeepLooseEqual',
actual : a,
expected : b,
extra : extra
});
};
Test.prototype['throws'] = function (fn, expected, msg, extra) {
if (typeof expected === 'string') {
msg = expected;
expected = undefined;
}
var caught = undefined;
try {
fn();
} catch (err) {
caught = { error : err };
var message = err.message;
delete err.message;
err.message = message;
}
var passed = caught;
if (expected instanceof RegExp) {
passed = expected.test(caught && caught.error);
expected = String(expected);
}
if (typeof expected === 'function' && caught) {
passed = caught.error instanceof expected;
caught.error = caught.error.constructor;
}
this._assert(typeof fn === 'function' && passed, {
message : defined(msg, 'should throw'),
operator : 'throws',
actual : caught && caught.error,
expected : expected,
error: !passed && caught && caught.error,
extra : extra
});
};
Test.prototype.doesNotThrow = function (fn, expected, msg, extra) {
if (typeof expected === 'string') {
msg = expected;
expected = undefined;
}
var caught = undefined;
try {
fn();
}
catch (err) {
caught = { error : err };
}
this._assert(!caught, {
message : defined(msg, 'should not throw'),
operator : 'throws',
actual : caught && caught.error,
expected : expected,
error : caught && caught.error,
extra : extra
});
};
Test.skip = function (name_, _opts, _cb) {
var args = getTestArgs.apply(null, arguments);
args.opts.skip = true;
return Test(args.name, args.opts, args.cb);
};
// vim: set softtabstop=4 shiftwidth=4:
}).call(this)}).call(this,"/node_modules/tape/lib")
},{"./next_tick":110,"deep-equal":28,"defined":32,"events":8,"has":93,"inherits":95,"path":9,"string.prototype.trim":104}],113:[function(require,module,exports){
var hasMap = typeof Map === 'function' && Map.prototype;
var mapSizeDescriptor = Object.getOwnPropertyDescriptor && hasMap ? Object.getOwnPropertyDescriptor(Map.prototype, 'size') : null;
var mapSize = hasMap && mapSizeDescriptor && typeof mapSizeDescriptor.get === 'function' ? mapSizeDescriptor.get : null;
var mapForEach = hasMap && Map.prototype.forEach;
var hasSet = typeof Set === 'function' && Set.prototype;
var setSizeDescriptor = Object.getOwnPropertyDescriptor && hasSet ? Object.getOwnPropertyDescriptor(Set.prototype, 'size') : null;
var setSize = hasSet && setSizeDescriptor && typeof setSizeDescriptor.get === 'function' ? setSizeDescriptor.get : null;
var setForEach = hasSet && Set.prototype.forEach;
module.exports = function inspect_ (obj, opts, depth, seen) {
if (!opts) opts = {};
var maxDepth = opts.depth === undefined ? 5 : opts.depth;
if (depth === undefined) depth = 0;
if (depth >= maxDepth && maxDepth > 0
&& obj && typeof obj === 'object') {
return '[Object]';
}
if (seen === undefined) seen = [];
else if (indexOf(seen, obj) >= 0) {
return '[Circular]';
}
function inspect (value, from) {
if (from) {
seen = seen.slice();
seen.push(from);
}
return inspect_(value, opts, depth + 1, seen);
}
if (typeof obj === 'string') {
return inspectString(obj);
}
else if (typeof obj === 'function') {
var name = nameOf(obj);
return '[Function' + (name ? ': ' + name : '') + ']';
}
else if (obj === null) {
return 'null';
}
else if (isSymbol(obj)) {
var symString = Symbol.prototype.toString.call(obj);
return typeof obj === 'object' ? 'Object(' + symString + ')' : symString;
}
else if (isElement(obj)) {
var s = '<' + String(obj.nodeName).toLowerCase();
var attrs = obj.attributes || [];
for (var i = 0; i < attrs.length; i++) {
s += ' ' + attrs[i].name + '="' + quote(attrs[i].value) + '"';
}
s += '>';
if (obj.childNodes && obj.childNodes.length) s += '...';
s += '</' + String(obj.nodeName).toLowerCase() + '>';
return s;
}
else if (isArray(obj)) {
if (obj.length === 0) return '[]';
var xs = Array(obj.length);
for (var i = 0; i < obj.length; i++) {
xs[i] = has(obj, i) ? inspect(obj[i], obj) : '';
}
return '[ ' + xs.join(', ') + ' ]';
}
else if (isError(obj)) {
var parts = [];
for (var key in obj) {
if (!has(obj, key)) continue;
if (/[^\w$]/.test(key)) {
parts.push(inspect(key) + ': ' + inspect(obj[key]));
}
else {
parts.push(key + ': ' + inspect(obj[key]));
}
}
if (parts.length === 0) return '[' + obj + ']';
return '{ [' + obj + '] ' + parts.join(', ') + ' }';
}
else if (typeof obj === 'object' && typeof obj.inspect === 'function') {
return obj.inspect();
}
else if (isMap(obj)) {
var parts = [];
mapForEach.call(obj, function (value, key) {
parts.push(inspect(key, obj) + ' => ' + inspect(value, obj));
});
return 'Map (' + mapSize.call(obj) + ') {' + parts.join(', ') + '}';
}
else if (isSet(obj)) {
var parts = [];
setForEach.call(obj, function (value ) {
parts.push(inspect(value, obj));
});
return 'Set (' + setSize.call(obj) + ') {' + parts.join(', ') + '}';
}
else if (typeof obj === 'object' && !isDate(obj) && !isRegExp(obj)) {
var xs = [], keys = [];
for (var key in obj) {
if (has(obj, key)) keys.push(key);
}
keys.sort();
for (var i = 0; i < keys.length; i++) {
var key = keys[i];
if (/[^\w$]/.test(key)) {
xs.push(inspect(key) + ': ' + inspect(obj[key], obj));
}
else xs.push(key + ': ' + inspect(obj[key], obj));
}
if (xs.length === 0) return '{}';
return '{ ' + xs.join(', ') + ' }';
}
else return String(obj);
};
function quote (s) {
return String(s).replace(/"/g, '"');
}
function isArray (obj) { return toStr(obj) === '[object Array]' }
function isDate (obj) { return toStr(obj) === '[object Date]' }
function isRegExp (obj) { return toStr(obj) === '[object RegExp]' }
function isError (obj) { return toStr(obj) === '[object Error]' }
function isSymbol (obj) { return toStr(obj) === '[object Symbol]' }
var hasOwn = Object.prototype.hasOwnProperty || function (key) { return key in this; };
function has (obj, key) {
return hasOwn.call(obj, key);
}
function toStr (obj) {
return Object.prototype.toString.call(obj);
}
function nameOf (f) {
if (f.name) return f.name;
var m = f.toString().match(/^function\s*([\w$]+)/);
if (m) return m[1];
}
function indexOf (xs, x) {
if (xs.indexOf) return xs.indexOf(x);
for (var i = 0, l = xs.length; i < l; i++) {
if (xs[i] === x) return i;
}
return -1;
}
function isMap (x) {
if (!mapSize) {
return false;
}
try {
mapSize.call(x);
return true;
} catch (e) {}
return false;
}
function isSet (x) {
if (!setSize) {
return false;
}
try {
setSize.call(x);
return true;
} catch (e) {}
return false;
}
function isElement (x) {
if (!x || typeof x !== 'object') return false;
if (typeof HTMLElement !== 'undefined' && x instanceof HTMLElement) {
return true;
}
return typeof x.nodeName === 'string'
&& typeof x.getAttribute === 'function'
;
}
function inspectString (str) {
var s = str.replace(/(['\\])/g, '\\$1').replace(/[\x00-\x1f]/g, lowbyte);
return "'" + s + "'";
function lowbyte (c) {
var n = c.charCodeAt(0);
var x = { 8: 'b', 9: 't', 10: 'n', 12: 'f', 13: 'r' }[n];
if (x) return '\\' + x;
return '\\x' + (n < 0x10 ? '0' : '') + n.toString(16);
}
}
},{}],114:[function(require,module,exports){
(function (process){(function (){
var Stream = require('stream')
// through
//
// a stream that does nothing but re-emit the input.
// useful for aggregating a series of changing but not ending streams into one stream)
exports = module.exports = through
through.through = through
//create a readable writable stream.
function through (write, end, opts) {
write = write || function (data) { this.queue(data) }
end = end || function () { this.queue(null) }
var ended = false, destroyed = false, buffer = [], _ended = false
var stream = new Stream()
stream.readable = stream.writable = true
stream.paused = false
// stream.autoPause = !(opts && opts.autoPause === false)
stream.autoDestroy = !(opts && opts.autoDestroy === false)
stream.write = function (data) {
write.call(this, data)
return !stream.paused
}
function drain() {
while(buffer.length && !stream.paused) {
var data = buffer.shift()
if(null === data)
return stream.emit('end')
else
stream.emit('data', data)
}
}
stream.queue = stream.push = function (data) {
// console.error(ended)
if(_ended) return stream
if(data === null) _ended = true
buffer.push(data)
drain()
return stream
}
//this will be registered as the first 'end' listener
//must call destroy next tick, to make sure we're after any
//stream piped from here.
//this is only a problem if end is not emitted synchronously.
//a nicer way to do this is to make sure this is the last listener for 'end'
stream.on('end', function () {
stream.readable = false
if(!stream.writable && stream.autoDestroy)
process.nextTick(function () {
stream.destroy()
})
})
function _end () {
stream.writable = false
end.call(stream)
if(!stream.readable && stream.autoDestroy)
stream.destroy()
}
stream.end = function (data) {
if(ended) return
ended = true
if(arguments.length) stream.write(data)
_end() // will emit or queue
return stream
}
stream.destroy = function () {
if(destroyed) return
destroyed = true
ended = true
buffer.length = 0
stream.writable = stream.readable = false
stream.emit('close')
return stream
}
stream.pause = function () {
if(stream.paused) return
stream.paused = true
return stream
}
stream.resume = function () {
if(stream.paused) {
stream.paused = false
stream.emit('resume')
}
drain()
//may have become paused again,
//as drain emits 'data'.
if(!stream.paused)
stream.emit('drain')
return stream
}
return stream
}
}).call(this)}).call(this,require('_process'))
},{"_process":100,"stream":10}],115:[function(require,module,exports){
(function (setImmediate,clearImmediate){(function (){
var nextTick = require('process/browser.js').nextTick;
var apply = Function.prototype.apply;
var slice = Array.prototype.slice;
var immediateIds = {};
var nextImmediateId = 0;
// DOM APIs, for completeness
exports.setTimeout = function() {
return new Timeout(apply.call(setTimeout, window, arguments), clearTimeout);
};
exports.setInterval = function() {
return new Timeout(apply.call(setInterval, window, arguments), clearInterval);
};
exports.clearTimeout =
exports.clearInterval = function(timeout) { timeout.close(); };
function Timeout(id, clearFn) {
this._id = id;
this._clearFn = clearFn;
}
Timeout.prototype.unref = Timeout.prototype.ref = function() {};
Timeout.prototype.close = function() {
this._clearFn.call(window, this._id);
};
// Does not start the time, just sets up the members needed.
exports.enroll = function(item, msecs) {
clearTimeout(item._idleTimeoutId);
item._idleTimeout = msecs;
};
exports.unenroll = function(item) {
clearTimeout(item._idleTimeoutId);
item._idleTimeout = -1;
};
exports._unrefActive = exports.active = function(item) {
clearTimeout(item._idleTimeoutId);
var msecs = item._idleTimeout;
if (msecs >= 0) {
item._idleTimeoutId = setTimeout(function onTimeout() {
if (item._onTimeout)
item._onTimeout();
}, msecs);
}
};
// That's not how node.js implements it but the exposed api is the same.
exports.setImmediate = typeof setImmediate === "function" ? setImmediate : function(fn) {
var id = nextImmediateId++;
var args = arguments.length < 2 ? false : slice.call(arguments, 1);
immediateIds[id] = true;
nextTick(function onNextTick() {
if (immediateIds[id]) {
// fn.call() is faster so we optimize for the common use-case
// @see http://jsperf.com/call-apply-segu
if (args) {
fn.apply(null, args);
} else {
fn.call(null);
}
// Prevent ids from leaking
exports.clearImmediate(id);
}
});
return id;
};
exports.clearImmediate = typeof clearImmediate === "function" ? clearImmediate : function(id) {
delete immediateIds[id];
};
}).call(this)}).call(this,require("timers").setImmediate,require("timers").clearImmediate)
},{"process/browser.js":100,"timers":115}],116:[function(require,module,exports){
(function (global){(function (){
/**
* Module exports.
*/
module.exports = deprecate;
/**
* Mark that a method should not be used.
* Returns a modified function which warns once by default.
*
* If `localStorage.noDeprecation = true` is set, then it is a no-op.
*
* If `localStorage.throwDeprecation = true` is set, then deprecated functions
* will throw an Error when invoked.
*
* If `localStorage.traceDeprecation = true` is set, then deprecated functions
* will invoke `console.trace()` instead of `console.error()`.
*
* @param {Function} fn - the function to deprecate
* @param {String} msg - the string to print to the console when `fn` is invoked
* @returns {Function} a new "deprecated" version of `fn`
* @api public
*/
function deprecate (fn, msg) {
if (config('noDeprecation')) {
return fn;
}
var warned = false;
function deprecated() {
if (!warned) {
if (config('throwDeprecation')) {
throw new Error(msg);
} else if (config('traceDeprecation')) {
console.trace(msg);
} else {
console.warn(msg);
}
warned = true;
}
return fn.apply(this, arguments);
}
return deprecated;
}
/**
* Checks `localStorage` for boolean values for the given `name`.
*
* @param {String} name
* @returns {Boolean}
* @api private
*/
function config (name) {
// accessing global.localStorage can trigger a DOMException in sandboxed iframes
try {
if (!global.localStorage) return false;
} catch (_) {
return false;
}
var val = global.localStorage[name];
if (null == val) return false;
return String(val).toLowerCase() === 'true';
}
}).call(this)}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{}]},{},[2]);
| NodeError |
log.rs | //! Mod of types for ethereum logs
use ethers_core::{
abi::{Error, RawLog},
types::{Address, Log, TxHash, H256, U256, U64},
};
/// A trait for types (events) that can be decoded from a `RawLog`
pub trait EthLogDecode: Send + Sync {
/// decode from a `RawLog`
fn decode_log(log: &RawLog) -> Result<Self, Error>
where
Self: Sized;
}
/// Decodes a series of logs into a vector
pub fn | <T: EthLogDecode>(logs: &[RawLog]) -> Result<Vec<T>, Error> {
logs.iter().map(T::decode_log).collect()
}
/// Metadata inside a log
#[derive(Clone, Debug, PartialEq)]
pub struct LogMeta {
/// Address from which this log originated
pub address: Address,
/// The block in which the log was emitted
pub block_number: U64,
/// The block hash in which the log was emitted
pub block_hash: H256,
/// The transaction hash in which the log was emitted
pub transaction_hash: TxHash,
/// Transactions index position log was created from
pub transaction_index: U64,
/// Log index position in the block
pub log_index: U256,
}
impl From<&Log> for LogMeta {
fn from(src: &Log) -> Self {
LogMeta {
address: src.address,
block_number: src.block_number.expect("should have a block number"),
block_hash: src.block_hash.expect("should have a block hash"),
transaction_hash: src.transaction_hash.expect("should have a tx hash"),
transaction_index: src.transaction_index.expect("should have a tx index"),
log_index: src.log_index.expect("should have a log index"),
}
}
}
| decode_logs |
app-routing.module.ts | import { Routes, RouterModule } from "@angular/router";
import { AdminLayoutComponent } from "./layouts/admin-layout/admin-layout.component";
import { AuthLayoutComponent } from './layouts/auth-layout/auth-layout.component';
import { AuthGuard } from './security/auth.guard';
import { LoginComponent } from './login/login.component';
import { RegisterComponent } from "./register/register.component";
const routes: Routes = [
/* {
path: "",
redirectTo: "dashboard",
pathMatch: "full"
}, */
{
path: "",
component: AdminLayoutComponent,
canActivate: [AuthGuard],
children: [
{path: "", redirectTo: "dashboard", pathMatch: "full"},
{path: "", loadChildren: "./layouts/admin-layout/admin-layout.module#AdminLayoutModule"}
]
}, /* {
path: '',
component: AuthLayoutComponent,
children: [
{
path: '',
loadChildren: './layouts/auth-layout/auth-layout.module#AuthLayoutModule'
}
]
}, */
{path: "login", component: LoginComponent},
{path: "register", component: RegisterComponent},
{
path: "**",
redirectTo: "dashboard" // 404 page eklenmek istenirse buraya eklenmeli
}
];
@NgModule({
imports: [
CommonModule,
BrowserModule,
RouterModule.forRoot(routes)
],
exports: [RouterModule]
})
export class AppRoutingModule {} | import { NgModule } from "@angular/core";
import { CommonModule } from "@angular/common";
import { BrowserModule } from "@angular/platform-browser"; |
|
getPrivateEndpointConnection.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20200313
import (
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func LookupPrivateEndpointConnection(ctx *pulumi.Context, args *LookupPrivateEndpointConnectionArgs, opts ...pulumi.InvokeOption) (*LookupPrivateEndpointConnectionResult, error) |
type LookupPrivateEndpointConnectionArgs struct {
// The name of the private endpoint connection to the Azure Cognitive Search service with the specified resource group.
PrivateEndpointConnectionName string `pulumi:"privateEndpointConnectionName"`
// The name of the resource group within the current subscription. You can obtain this value from the Azure Resource Manager API or the portal.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The name of the Azure Cognitive Search service associated with the specified resource group.
SearchServiceName string `pulumi:"searchServiceName"`
}
// Describes an existing Private Endpoint connection to the Azure Cognitive Search service.
type LookupPrivateEndpointConnectionResult struct {
// The name of the private endpoint connection.
Name string `pulumi:"name"`
// Describes the properties of an existing Private Endpoint connection to the Azure Cognitive Search service.
Properties PrivateEndpointConnectionPropertiesResponse `pulumi:"properties"`
// The resource type.
Type string `pulumi:"type"`
}
| {
var rv LookupPrivateEndpointConnectionResult
err := ctx.Invoke("azure-nextgen:search/v20200313:getPrivateEndpointConnection", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
} |
build.rs | #![cfg_attr(all(feature = "alloc", not(feature = "std")), feature(alloc))]
#![cfg_attr(feature = "nightly", feature(doc_cfg))]
#![cfg_attr(feature = "simd_backend", feature(stdsimd))]
#![allow(unused_variables)]
#![allow(non_snake_case)]
#![allow(dead_code)]
//#![cfg_attr(all(feature = "mesalock_sgx", not(target_env = "sgx")), no_std)]
//#![cfg_attr(all(target_env = "sgx", target_vendor = "mesalock"), feature(rustc_private))]
//
//#[cfg(all(feature = "mesalock_sgx", not(target_env = "sgx")))]
//#[macro_use]
//extern crate sgx_tstd as std;
//use std::prelude::v1::*;
//#[cfg(all(feature = "alloc", not(feature = "std")))]
//extern crate alloc;
//extern crate byteorder;
//extern crate clear_on_drop;
//extern crate core;
//extern crate digest;
//extern crate rand_core;
//extern crate subtle;
//
//#[cfg(all(feature = "nightly", feature = "packed_simd"))]
//extern crate packed_simd;
//
//use std::env;
////use std::untrusted::fs::File;
//use std::fs::File;
//use std::io::Write;
//use std::path::Path;
//
//// Replicate lib.rs in the build.rs, since we're effectively building the whole crate twice.
////
//// This should be fixed up by refactoring our code to seperate the "minimal" parts from the rest.
////
//// For instance, this shouldn't exist here at all, but it does.
//#[cfg(feature = "serde")]
//extern crate serde;
//
//// Macros come first!
//#[path = "src/macros.rs"]
//#[macro_use]
//mod macros;
//
//// Public modules
//
//#[path = "src/constants.rs"]
//mod constants;
//#[path = "src/edwards.rs"]
//mod edwards;
//#[path = "src/montgomery.rs"]
//mod montgomery;
//#[path = "src/ristretto.rs"]
//mod ristretto;
//#[path = "src/scalar.rs"]
//mod scalar;
//#[path = "src/traits.rs"]
//mod traits;
//
//// Internal modules
//
//#[path = "src/backend/mod.rs"]
//mod backend;
//#[path = "src/field.rs"]
//mod field;
//#[path = "src/prelude.rs"]
//mod prelude;
//#[path = "src/window.rs"]
//mod window;
//
//use edwards::EdwardsBasepointTable;
fn | () {
// // Enable the "stage2_build" feature in the main build stage
// println!("cargo:rustc-cfg=feature=\"stage2_build\"\n");
//
// let out_dir = env::var("OUT_DIR").unwrap();
// let dest_path = Path::new(&out_dir).join("basepoint_table.rs");
// let mut f = File::create(&dest_path).unwrap();
//
// // Generate a table of precomputed multiples of the basepoint
// let table = EdwardsBasepointTable::create(&constants::ED25519_BASEPOINT_POINT);
//
// f.write_all(
// format!(
// "\n
//#[cfg(feature = \"u32_backend\")]
//use backend::serial::u32::field::FieldElement2625;
//
//#[cfg(feature = \"u64_backend\")]
//use backend::serial::u64::field::FieldElement51;
//
//use edwards::EdwardsBasepointTable;
//
//use backend::serial::curve_models::AffineNielsPoint;
//
//use window::LookupTable;
//
///// Table containing precomputed multiples of the Ed25519 basepoint \\\\(B = (x, 4/5)\\\\).
//pub const ED25519_BASEPOINT_TABLE: EdwardsBasepointTable = ED25519_BASEPOINT_TABLE_INNER_DOC_HIDDEN;
//
///// Inner constant, used to avoid filling the docs with precomputed points.
//#[doc(hidden)]
//pub const ED25519_BASEPOINT_TABLE_INNER_DOC_HIDDEN: EdwardsBasepointTable = {:?};
//\n\n",
// &table
// )
// .as_bytes(),
// )
// .unwrap();
//
// // Now generate AFFINE_ODD_MULTIPLES_OF_BASEPOINT
// // if we are going to build the serial scalar_mul backend
// #[cfg(not(all(
// feature = "simd_backend",
// any(target_feature = "avx2", target_feature = "avx512ifma")
// )))]
// {
// use backend::serial::curve_models::AffineNielsPoint;
// use window::NafLookupTable8;
//
// let B = &constants::ED25519_BASEPOINT_POINT;
// let odd_multiples = NafLookupTable8::<AffineNielsPoint>::from(B);
//
// f.write_all(
// format!(
// "\n
//use window::NafLookupTable8;
///// Odd multiples of the basepoint `[B, 3B, 5B, 7B, 9B, 11B, 13B, 15B, ..., 127B]`.
//pub(crate) const AFFINE_ODD_MULTIPLES_OF_BASEPOINT: NafLookupTable8<AffineNielsPoint> = {:?};
//\n\n",
// &odd_multiples
// )
// .as_bytes(),
// )
// .unwrap();
// }
}
| main |
visexp.py | # -*- coding: utf-8 -*-
#Name: Fractal Example - Exponential Curves
#Author: Sean Pope
#Example use of the fractal engine and coefficient block.
#Creates random coefficient blocks and draws frames to create a simple animation.
#This one is optimized for the exponential variation.
import matplotlib.pyplot as plt
import PyFrac as pf
plt.style.use('dark_background') #Mostly just used for the black background.
ax = plt.subplot(111,frameon=False) #Create a figure and axes for drawing.
ax.axes.get_xaxis().set_visible(False) #Hide axis
ax.axes.get_yaxis().set_visible(False)
plt.xlim(-1,1) #This function looks best in the biunit square.
plt.ylim(-1,1)
def | (*args): #Closes the event loop when no longer needed.
global run
run = 0
return
fig = plt.gcf() #Get the figure that pyplot spawned.
fig.canvas.mpl_connect('close_event', quitloop) #If the window is closed, exit loop.
fig.canvas.mpl_connect('key_press_event', quitloop) #If a button is pressed, close.
mng = plt.get_current_fig_manager() #Grab the figure window
mng.full_screen_toggle() #Maximize the image to fill the screen.
""" Runtime variables """
run = 1 #Set to continue drawing frames, unset to terminate
framecount = 0 #Used to set frames drawn per coefficient block
frameclear = 0 #Starts deleting frames when set
coeffs = pf.coeffs.rand(0.9,0.2)
""" Main event loop. """
while(run):
framecount += 1
if framecount == 40: #Draws a new coefficient set if the current image is done.
frameclear = 1
coeffs = pf.coeffs.rand(0.9,0.2)
framecount -= 40 #Reset frame counter.
fractal = pf.engine.fractpoints(coeffs, 200, pf.variations.exponential) #Run the engine to get a figure.
plt.scatter(fractal['x'], fractal['y'], #Get the x,y coordinates for each point
marker='.', alpha=0.8, #Use small pixel markers with low opacity
c=fractal['color'], cmap='plasma', #Map the color row to this colormap.
s=25, edgecolor='none'
)
if frameclear:
del ax.collections[0] #Remove the oldest frame.
plt.pause(.01) #This pause draws the frame before looping.
plt.close(fig) | quitloop |
CacheModel.ts | import { Table, Field, DBType, PrimaryKey } from "fastcar-core/annotation";
import "reflect-metadata";
@Table("cache")
class | {
@Field("key")
@DBType("varchar")
@PrimaryKey
key!: string;
@Field("value")
@DBType("varchar")
value!: string;
@Field("ttl")
@DBType("int")
ttl!: number;
@Field("update_time")
updateTime!: Date;
constructor(...args: any) {
Object.assign(this, ...args);
}
}
export default CacheModel;
| CacheModel |
from_into.rs | // The From trait is used for value-to-value conversions.
// If From is implemented correctly for a type, the Into trait should work conversely.
// You can read more about it at https://doc.rust-lang.org/std/convert/trait.From.html
#[derive(Debug)]
struct Person {
name: String,
age: usize,
}
// We implement the Default trait to use it as a fallback
// when the provided string is not convertible into a Person object
impl Default for Person {
fn default() -> Person {
Person {
name: String::from("John"),
age: 30,
}
}
}
// Your task is to complete this implementation
// in order for the line `let p = Person::from("Mark,20")` to compile
// Please note that you'll need to parse the age component into a `usize`
// with something like `"4".parse::<usize>()`. The outcome of this needs to
// be handled appropriately.
//
// Steps:
// 1. If the length of the provided string is 0, then return the default of Person
// 2. Split the given string on the commas present in it
// 3. Extract the first element from the split operation and use it as the name
// 4. If the name is empty, then return the default of Person
// 5. Extract the other element from the split operation and parse it into a `usize` as the age
// If while parsing the age, something goes wrong, then return the default of Person
// Otherwise, then return an instantiated Person object with the results
impl From<&str> for Person {
fn from(s: &str) -> Person {
let parts: Vec<&str> = s.trim().split(',').collect();
if parts.len() != 2 {
Self::default()
} else if parts[0].len() == 0 || parts[1].len() == 0 {
Self::default()
} else {
let name = parts[0].to_string();
if let Ok(age) = parts[1].parse::<usize>() {
Person { name, age }
} else {
Self::default()
}
}
}
}
fn main() {
// Use the `from` function
let p1 = Person::from("Mark,20");
// Since From is implemented for Person, we should be able to use Into
let p2: Person = "Gerald,70".into();
println!("{:?}", p1);
println!("{:?}", p2);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default() {
// Test that the default person is 30 year old John
let dp = Person::default();
assert_eq!(dp.name, "John");
assert_eq!(dp.age, 30);
}
#[test]
fn test_bad_convert() {
// Test that John is returned when bad string is provided
let p = Person::from("");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_good_convert() {
// Test that "Mark,20" works
let p = Person::from("Mark,20");
assert_eq!(p.name, "Mark");
assert_eq!(p.age, 20);
}
#[test]
fn test_bad_age() |
#[test]
fn test_missing_comma_and_age() {
let p: Person = Person::from("Mark");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_age() {
let p: Person = Person::from("Mark,");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name() {
let p: Person = Person::from(",1");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name_and_age() {
let p: Person = Person::from(",");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name_and_invalid_age() {
let p: Person = Person::from(",one");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_trailing_comma() {
let p: Person = Person::from("Mike,32,");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_trailing_comma_and_some_string() {
let p: Person = Person::from("Mike,32,man");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
}
| {
// Test that "Mark,twenty" will return the default person due to an error in parsing age
let p = Person::from("Mark,twenty");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
} |
visualize.py | from src.data.datasets import FlickrDataset
from src.config import config
import matplotlib.pyplot as plt
import torch
from PIL import Image
def | (dataset, index=0, predicted_caption=None):
image = Image.open(dataset.images_directory / dataset.image_ids[index])
caption_txt = "\n".join(dataset.img_caption_dict[dataset.image_ids[index]])
fig = plt.figure(figsize=(30, 12))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(image)
ax.axis("off")
ax = fig.add_subplot(1, 2, 2)
ax.text(0,0.1,"Actual:", fontsize=15, verticalalignment="top", weight="bold")
ax.text(0,0.15,caption_txt, fontsize=15, verticalalignment="top", weight="bold")
ax.text(0,0.4,"Predicted:", fontsize=15, verticalalignment="top", weight="bold")
ax.text(0,0.45,caption_txt, fontsize=15, verticalalignment="top", weight="bold")
ax.axis("off")
ax.invert_yaxis()
if __name__ == "__main__":
training_dataset = FlickrDataset(file_name=config.CAPTIONS_TRAIN_FILE, dtype="train")
display_img_FlickrDataset(training_dataset, 100)
| display_img_FlickrDataset |
score-display.tsx | import { Score } from 'app/models/Score';
import React from 'react';
export type ScoreProps = {
scores: Score[];
};
const ScoreDisplay: React.FC<ScoreProps> = (props: ScoreProps) => {
const scores = props.scores;
const orderedScores = scores.sort((a, b) => a.score - b.score).reverse();
return <div className="score-table">
<div className="table-header">
<div className="table-entry">Scores</div>
</div> | <div className="table-entry">{score.score}</div>
</div>;
})}
</div>
}
export default ScoreDisplay; | {orderedScores.map((score: Score) => {
return <div key={score.player} className="table-row">
<div className="table-entry">{score.player}</div> |
mod.rs | mod wireframe; |
pub use wireframe::{DebugWireframePlugin, EnableWireframe}; | |
common.py | """Common IO api utilities"""
from __future__ import annotations
import bz2
import codecs
from collections import abc
import dataclasses
import gzip
from io import BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOWrapper
import mmap
import os
from typing import IO, Any, AnyStr, Dict, List, Mapping, Optional, Tuple, Union, cast
from urllib.parse import (
urljoin,
urlparse as parse_url,
uses_netloc,
uses_params,
uses_relative,
)
import warnings
import zipfile
from pandas._typing import (
Buffer,
CompressionDict,
CompressionOptions,
FileOrBuffer,
FilePathOrBuffer,
StorageOptions,
)
from pandas.compat import get_lzma_file, import_lzma
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import is_file_like
lzma = import_lzma()
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard("")
@dataclasses.dataclass
class IOArgs:
"""
Return value of io/common.py:_get_filepath_or_buffer.
Note (copy&past from io/parsers):
filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
though mypy handling of conditional imports is difficult.
See https://github.com/python/mypy/issues/1297
"""
filepath_or_buffer: FileOrBuffer
encoding: str
mode: str
compression: CompressionDict
should_close: bool = False
@dataclasses.dataclass
class IOHandles:
"""
Return value of io/common.py:get_handle
Can be used as a context manager.
This is used to easily close created buffers and to handle corner cases when
TextIOWrapper is inserted.
handle: The file handle to be used.
created_handles: All file handles that are created by get_handle
is_wrapped: Whether a TextIOWrapper needs to be detached.
"""
handle: Buffer
compression: CompressionDict
created_handles: List[Buffer] = dataclasses.field(default_factory=list)
is_wrapped: bool = False
is_mmap: bool = False
def close(self) -> None:
"""
Close all created buffers.
Note: If a TextIOWrapper was inserted, it is flushed and detached to
avoid closing the potentially user-created buffer.
"""
if self.is_wrapped:
assert isinstance(self.handle, TextIOWrapper)
self.handle.flush()
self.handle.detach()
self.created_handles.remove(self.handle)
try:
for handle in self.created_handles:
handle.close()
except (OSError, ValueError):
pass
self.created_handles = []
self.is_wrapped = False
def __enter__(self) -> IOHandles:
return self
def __exit__(self, *args: Any) -> None:
self.close()
def is_url(url) -> bool:
"""
Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
if not isinstance(url, str):
return False
return parse_url(url).scheme in _VALID_URLS
def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]:
"""
Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, str):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
def stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr],
convert_file_like: bool = False,
) -> FileOrBuffer[AnyStr]:
"""
Attempt to convert a path-like object to a string.
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol (python 3.6+) are coerced
according to its __fspath__ method.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
if not convert_file_like and is_file_like(filepath_or_buffer):
# GH 38125: some fsspec objects implement os.PathLike but have already opened a
# file. This prevents opening the file a second time. infer_compression calls
# this function with convert_file_like=True to infer the compression.
return cast(FileOrBuffer[AnyStr], filepath_or_buffer)
if isinstance(filepath_or_buffer, os.PathLike):
filepath_or_buffer = filepath_or_buffer.__fspath__()
return _expand_user(filepath_or_buffer)
def urlopen(*args, **kwargs):
"""
Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
the stdlib.
"""
import urllib.request
return urllib.request.urlopen(*args, **kwargs)
def is_fsspec_url(url: FilePathOrBuffer) -> bool:
"""
Returns true if the given URL looks like
something fsspec can handle
"""
return (
isinstance(url, str)
and "://" in url
and not url.startswith(("http://", "https://"))
)
def _get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
encoding: str = "utf-8",
compression: CompressionOptions = None,
mode: str = "r",
storage_options: StorageOptions = None,
) -> IOArgs:
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional
encoding : the encoding to use to decode bytes, default is 'utf-8'
mode : str, optional
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values
.. versionadded:: 1.2.0
..versionchange:: 1.2.0
Returns the dataclass IOArgs.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
# handle compression dict
compression_method, compression = get_compression_method(compression)
compression_method = infer_compression(filepath_or_buffer, compression_method)
# GH21227 internal compression is not used for non-binary handles.
if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
warnings.warn(
"compression has no effect when passing a non-binary object as input.",
RuntimeWarning,
stacklevel=2,
)
compression_method = None
compression = dict(compression, method=compression_method)
# uniform encoding names
if encoding is not None:
encoding = encoding.replace("_", "-").lower()
# bz2 and xz do not write the byte order mark for utf-16 and utf-32
# print a warning when writing such files
if (
"w" in mode
and compression_method in ["bz2", "xz"]
and encoding in ["utf-16", "utf-32"]
):
warnings.warn(
f"{compression} will not write the byte order mark for {encoding}",
UnicodeWarning,
)
# Use binary mode when converting path-like objects to file-like objects (fsspec)
# except when text mode is explicitly requested. The original mode is returned if
# fsspec is not used.
fsspec_mode = mode
if "t" not in fsspec_mode and "b" not in fsspec_mode:
fsspec_mode += "b"
if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
# TODO: fsspec can also handle HTTP via requests, but leaving this
# unchanged. using fsspec appears to break the ability to infer if the
# server responded with gzipped data
storage_options = storage_options or {}
# waiting until now for importing to match intended lazy logic of
# urlopen function defined elsewhere in this module
import urllib.request
# assuming storage_options is to be interpreted as headers
req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
with urlopen(req_info) as req:
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
# Override compression based on Content-Encoding header
compression = {"method": "gzip"}
reader = BytesIO(req.read())
return IOArgs(
filepath_or_buffer=reader,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
if is_fsspec_url(filepath_or_buffer):
assert isinstance(
filepath_or_buffer, str
) # just to appease mypy for this branch
# two special-case s3-like protocols; these have special meaning in Hadoop,
# but are equivalent to just "s3" from fsspec's point of view
# cc #11071
if filepath_or_buffer.startswith("s3a://"):
filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
if filepath_or_buffer.startswith("s3n://"):
filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
fsspec = import_optional_dependency("fsspec")
# If botocore is installed we fallback to reading with anon=True
# to allow reads from public buckets
err_types_to_retry_with_anon: List[Any] = []
try:
import_optional_dependency("botocore")
from botocore.exceptions import ClientError, NoCredentialsError
err_types_to_retry_with_anon = [
ClientError,
NoCredentialsError,
PermissionError,
]
except ImportError:
pass
try:
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
# GH 34626 Reads from Public Buckets without Credentials needs anon=True
except tuple(err_types_to_retry_with_anon):
if storage_options is None:
storage_options = {"anon": True}
else:
# don't mutate user input.
storage_options = dict(storage_options)
storage_options["anon"] = True
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
return IOArgs(
filepath_or_buffer=file_obj,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
elif storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
return IOArgs(
filepath_or_buffer=_expand_user(filepath_or_buffer),
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
if not is_file_like(filepath_or_buffer):
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
raise ValueError(msg)
return IOArgs(
filepath_or_buffer=filepath_or_buffer,
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
def file_path_to_url(path: str) -> str:
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
# lazify expensive import (~30ms)
from urllib.request import pathname2url
return urljoin("file:", pathname2url(path))
_compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"}
def get_compression_method(
compression: CompressionOptions,
) -> Tuple[Optional[str], CompressionDict]:
"""
Simplifies a compression argument to a compression method string and
a mapping containing additional arguments.
Parameters
----------
compression : str or mapping
If string, specifies the compression method. If mapping, value at key
'method' specifies compression method.
Returns
-------
tuple of ({compression method}, Optional[str]
{compression arguments}, Dict[str, Any])
Raises
------
ValueError on mapping missing 'method' key
"""
compression_method: Optional[str]
if isinstance(compression, Mapping):
compression_args = dict(compression)
try:
compression_method = compression_args.pop("method")
except KeyError as err:
raise ValueError("If mapping, compression must have key 'method'") from err
else:
compression_args = {}
compression_method = compression
return compression_method, compression_args
def infer_compression(
filepath_or_buffer: FilePathOrBuffer, compression: Optional[str]
) -> Optional[str]:
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer : str or file handle
File path or object.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
or '.xz' (otherwise no compression).
Returns
-------
string or None
Raises
------
ValueError on invalid compression specified.
"""
if compression is None:
return None
# Infer compression
if compression == "infer":
# Convert all path types (e.g. pathlib.Path) to strings
filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
if not isinstance(filepath_or_buffer, str):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.lower().endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _compression_to_extension:
return compression
# https://github.com/python/mypy/issues/5492
# Unsupported operand types for + ("List[Optional[str]]" and "List[str]")
valid = ["infer", None] + sorted(
_compression_to_extension
) # type: ignore[operator]
msg = (
f"Unrecognized compression type: {compression}\n"
f"Valid compression types are {valid}"
)
raise ValueError(msg)
def get_handle(
path_or_buf: FilePathOrBuffer,
mode: str,
encoding: Optional[str] = None,
compression: CompressionOptions = None,
memory_map: bool = False,
is_text: bool = True,
errors: Optional[str] = None,
storage_options: StorageOptions = None,
) -> IOHandles:
"""
Get file handle for given path/buffer and mode.
Parameters
----------
path_or_buf : str or file handle
File path or object.
mode : str
Mode to open path_or_buf with.
encoding : str or None
Encoding to use.
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {'infer',
'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{'zip', 'gzip', 'bz2'}, or inferred as one of the above,
other entries passed as additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other keys as compression options if compression
mode is 'zip'.
.. versionchanged:: 1.1.0
Passing compression options as keys in dict is now
supported for compression modes 'gzip' and 'bz2' as well as 'zip'.
memory_map : boolean, default False
See parsers._parser_params for more information.
is_text : boolean, default True
Whether the type of the content passed to the file/buffer is string or
bytes. This is not the same as `"b" not in mode`. If a string content is
passed to a binary file/buffer, a wrapper is inserted.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
storage_options: StorageOptions = None
Passed to _get_filepath_or_buffer
.. versionchanged:: 1.2.0
Returns the dataclass IOHandles
"""
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
encoding_passed, encoding = encoding, encoding or "utf-8"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
mode += "b"
# open URLs
ioargs = _get_filepath_or_buffer(
path_or_buf,
encoding=encoding,
compression=compression,
mode=mode,
storage_options=storage_options,
)
handle = ioargs.filepath_or_buffer
handles: List[Buffer]
# memory mapping needs to be the first step
handle, memory_map, handles = _maybe_memory_map(
handle, memory_map, ioargs.encoding, ioargs.mode, errors
)
is_path = isinstance(handle, str)
compression_args = dict(ioargs.compression)
compression = compression_args.pop("method")
if compression:
# compression libraries do not like an explicit text-mode
ioargs.mode = ioargs.mode.replace("t", "")
# GZ Compression
if compression == "gzip":
if is_path:
assert isinstance(handle, str)
handle = gzip.GzipFile(
filename=handle,
mode=ioargs.mode,
**compression_args,
)
else:
handle = gzip.GzipFile(
fileobj=handle, # type: ignore[arg-type]
mode=ioargs.mode,
**compression_args,
)
# BZ Compression
elif compression == "bz2":
handle = bz2.BZ2File(
handle, # type: ignore[arg-type]
mode=ioargs.mode,
**compression_args,
)
# ZIP Compression
elif compression == "zip":
handle = _BytesZipFile(handle, ioargs.mode, **compression_args)
if handle.mode == "r":
handles.append(handle)
zip_names = handle.namelist()
if len(zip_names) == 1:
handle = handle.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
"Multiple files found in ZIP file. "
f"Only one file per ZIP: {zip_names}"
)
# XZ Compression
elif compression == "xz":
handle = get_lzma_file(lzma)(handle, ioargs.mode)
# Unrecognized Compression
else:
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
assert not isinstance(handle, str)
handles.append(handle)
elif isinstance(handle, str):
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
if errors is None and encoding_passed is None:
# ignore errors when no encoding is specified
errors = "replace"
# Encoding
handle = open(
handle,
ioargs.mode,
encoding=ioargs.encoding,
errors=errors,
newline="",
)
else:
# Binary mode
handle = open(handle, ioargs.mode)
handles.append(handle)
# Convert BytesIO or file objects passed with an encoding
is_wrapped = False
if is_text and (compression or _is_binary_mode(handle, ioargs.mode)):
handle = TextIOWrapper(
handle, # type: ignore[arg-type]
encoding=ioargs.encoding,
errors=errors,
newline="",
)
handles.append(handle)
# only marked as wrapped when the caller provided a handle
is_wrapped = not (
isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
)
handles.reverse() # close the most recently added buffer first
if ioargs.should_close:
assert not isinstance(ioargs.filepath_or_buffer, str)
handles.append(ioargs.filepath_or_buffer)
assert not isinstance(handle, str)
return IOHandles(
handle=handle,
created_handles=handles,
is_wrapped=is_wrapped,
is_mmap=memory_map,
compression=ioargs.compression,
)
# error: Definition of "__exit__" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "BinaryIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "IO" [misc]
# error: Definition of "read" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "read" in base class "ZipFile" is incompatible with
# definition in base class "IO" [misc]
class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc]
"""
Wrapper for standard library class ZipFile and allow the returned file-like
handle to accept byte strings via `write` method.
BytesIO provides attributes of file-like object and ZipFile.writestr writes
bytes strings into a member of the archive.
"""
# GH 17778
def __init__(
self,
file: FilePathOrBuffer,
mode: str,
archive_name: Optional[str] = None,
**kwargs,
):
mode = mode.replace("b", "")
self.archive_name = archive_name
self.multiple_write_buffer: Optional[Union[StringIO, BytesIO]] = None
kwargs_zip: Dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED}
kwargs_zip.update(kwargs)
super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type]
def write(self, data):
# buffer multiple write calls, write on flush
if self.multiple_write_buffer is None:
self.multiple_write_buffer = (
BytesIO() if isinstance(data, bytes) else StringIO()
)
self.multiple_write_buffer.write(data) | def flush(self) -> None:
# write to actual handle and close write buffer
if self.multiple_write_buffer is None or self.multiple_write_buffer.closed:
return
# ZipFile needs a non-empty string
archive_name = self.archive_name or self.filename or "zip"
with self.multiple_write_buffer:
super().writestr(archive_name, self.multiple_write_buffer.getvalue())
def close(self):
self.flush()
super().close()
@property
def closed(self):
return self.fp is None
class _MMapWrapper(abc.Iterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
Parameters
----------
f : file object
File object to be mapped onto memory. Must support the 'fileno'
method or have an equivalent attribute
"""
def __init__(self, f: IO):
self.attributes = {}
for attribute in ("seekable", "readable", "writeable"):
if not hasattr(f, attribute):
continue
self.attributes[attribute] = getattr(f, attribute)()
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name: str):
if name in self.attributes:
return lambda: self.attributes[name]
return getattr(self.mmap, name)
def __iter__(self) -> _MMapWrapper:
return self
def __next__(self) -> str:
newbytes = self.mmap.readline()
# readline returns bytes, not str, but Python's CSV reader
# expects str, so convert the output to str before continuing
newline = newbytes.decode("utf-8")
# mmap doesn't raise if reading past the allocated
# data but instead returns an empty string, so raise
# if that is returned
if newline == "":
raise StopIteration
return newline
def _maybe_memory_map(
handle: FileOrBuffer,
memory_map: bool,
encoding: str,
mode: str,
errors: Optional[str],
) -> Tuple[FileOrBuffer, bool, List[Buffer]]:
"""Try to memory map file/buffer."""
handles: List[Buffer] = []
memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
if not memory_map:
return handle, memory_map, handles
# need to open the file first
if isinstance(handle, str):
if encoding and "b" not in mode:
# Encoding
handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
else:
# Binary mode
handle = open(handle, mode)
handles.append(handle)
try:
wrapped = cast(mmap.mmap, _MMapWrapper(handle)) # type: ignore[arg-type]
handle.close()
handles.remove(handle)
handles.append(wrapped)
handle = wrapped
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
# functionality of the C engine (pd.read_csv), so
# leave the file handler as is then
memory_map = False
return handle, memory_map, handles
def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool:
"""Test whether file exists."""
exists = False
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
return exists
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
return exists
def _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool:
"""Whether the handle is opened in binary mode"""
# classes that expect string but have 'b' in mode
text_classes = (codecs.StreamReaderWriter,)
if isinstance(handle, text_classes):
return False
# classes that expect bytes
binary_classes = (BufferedIOBase, RawIOBase)
return isinstance(handle, binary_classes) or "b" in getattr(handle, "mode", mode) | |
webpack.client.base.config.js | // Common client-side webpack configuration used by webpack.hot.config and webpack.rails.config.
const webpack = require('webpack');
const path = require('path');
const devBuild = process.env.NODE_ENV !== 'production';
const nodeEnv = devBuild ? 'development' : 'production';
module.exports = {
// the project dir
context: __dirname,
entry: {
// See use of 'vendor' in the CommonsChunkPlugin inclusion below.
vendor: [
'babel-polyfill',
'jquery',
'i18n-js',
],
// This will contain the app entry points defined by webpack.hot.config and webpack.rails.config
app: [
'imports?I18n=i18n-js!./vendor/translations',
'./vendor/imports',
'./app/lib/startup/clientRegistration'
],
},
resolve: {
extensions: ['', '.js', '.jsx'],
alias: {
lib: path.join(process.cwd(), 'app', 'lib'),
react: path.resolve('./node_modules/react'),
'react-dom': path.resolve('./node_modules/react-dom'),
},
}, | new webpack.DefinePlugin({
'process.env': {
NODE_ENV: JSON.stringify(nodeEnv),
},
}),
// https://webpack.github.io/docs/list-of-plugins.html#2-explicit-vendor-chunk
new webpack.optimize.CommonsChunkPlugin({
// This name 'vendor' ties into the entry definition
name: 'vendor',
// We don't want the default vendor.js name
filename: 'vendor-bundle.js',
// Passing Infinity just creates the commons chunk, but moves no modules into it.
// In other words, we only put what's in the vendor entry definition in vendor-bundle.js
minChunks: Infinity,
}),
],
module: {
loaders: [
{ test: /\.css$/, loader: 'style-loader!css-loader'},
// Not all apps require jQuery. Many Rails apps do, such as those using TurboLinks or bootstrap js
{ test: require.resolve('jquery'), loader: 'expose?jQuery' },
{ test: require.resolve('jquery'), loader: 'expose?$' },
{ test: require.resolve('i18n-js'), loader: 'expose?I18n' },
],
},
externals: [
{ xmlhttprequest: '{XMLHttpRequest:XMLHttpRequest}' }
]
}; | plugins: [ |
config_test.go | package config
import "testing"
func | (t *testing.T) {
tests := []struct {
name string
config SupplierConfig
want string
}{
{
name: "test with empty config",
config: SupplierConfig{},
want: "",
},
{
name: "test with empty path",
config: SupplierConfig{
Key: "tfstate",
Backend: "s3",
Path: "",
},
want: "tfstate+s3://",
},
{
name: "test valid config",
config: SupplierConfig{
Key: "tfstate",
Backend: "s3",
Path: "my-bucket/terraform.tfstate",
},
want: "tfstate+s3://my-bucket/terraform.tfstate",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.config.String(); got != tt.want {
t.Errorf("String() = %v, want %v", got, tt.want)
}
})
}
}
| TestSupplierConfig_String |
features.ts | import { HoverMerged } from '@sourcegraph/codeintellify/lib/types'
import { flatten } from 'lodash'
import { forkJoin, Observable } from 'rxjs'
import { map } from 'rxjs/operators'
import { Definition, Location, TextDocumentDecoration } from '../../../shared/src/api/protocol/plainTypes'
import { ExtensionsControllerProps } from '../extensions/ExtensionsClientCommonContext'
import { AbsoluteRepo, AbsoluteRepoFile, parseRepoURI } from '../repo'
import { toAbsoluteBlobURL, toPrettyBlobURL } from '../util/url'
import {
fetchXdefinition,
fetchXreferences,
LSPReferencesParams,
LSPSelector,
LSPTextDocumentPositionParams,
SymbolLocationInformation,
XReferenceOptions,
} from './lsp'
/**
* Specifies an LSP mode.
*/
export interface ModeSpec {
/** The LSP mode, which identifies the language server to use. */
mode: string
}
export { HoverMerged } // reexport to avoid needing to change all import sites - TODO(sqs): actually go change all them
/**
* Fetches hover information for the given location.
*
* @param ctx the location
* @return hover for the location
*/
export function getHover(
ctx: LSPTextDocumentPositionParams,
{ extensionsController }: ExtensionsControllerProps
): Observable<HoverMerged | null> {
return extensionsController.registries.textDocumentHover
.getHover({
textDocument: { uri: `git://${ctx.repoPath}?${ctx.commitID}#${ctx.filePath}` },
position: {
character: ctx.position.character - 1,
line: ctx.position.line - 1,
},
})
.pipe(map(hover => hover as HoverMerged | null))
}
/**
* Fetches definitions (in the same repository) for the given location.
*
* @param ctx the location
* @return definitions of the symbol at the location
*/
export function | (
ctx: LSPTextDocumentPositionParams,
{ extensionsController }: ExtensionsControllerProps
): Observable<Definition> {
return extensionsController.registries.textDocumentDefinition.getLocation({
textDocument: { uri: `git://${ctx.repoPath}?${ctx.commitID}#${ctx.filePath}` },
position: {
character: ctx.position.character - 1,
line: ctx.position.line - 1,
},
})
}
/**
* Fetches the destination URL for the "Go to definition" action in the hover.
*
* Only the first URL is returned, even if there are results from multiple providers or a provider returns
* multiple results.
*
* @param ctx the location containing the token whose definition to jump to
* @return destination URL
*/
export function getJumpURL(
ctx: LSPTextDocumentPositionParams,
extensions: ExtensionsControllerProps
): Observable<string | null> {
return getDefinition(ctx, extensions).pipe(
map(def => {
const defArray = Array.isArray(def) ? def : [def]
def = defArray[0]
if (!def) {
return null
}
const uri = parseRepoURI(def.uri) as LSPTextDocumentPositionParams
if (def.range) {
uri.position = { line: def.range.start.line + 1, character: def.range.start.character + 1 }
}
if (uri.repoPath === ctx.repoPath && uri.commitID === ctx.commitID) {
// Use pretty rev from the current context for same-repo J2D.
uri.rev = ctx.rev
return toPrettyBlobURL(uri)
}
return toAbsoluteBlobURL(uri)
})
)
}
/**
* Fetches the repository-independent symbol descriptor for the given location.
*
* Only the first result is returned, even if there are results from multiple providers.
*
* @param ctx the location
* @return information about the symbol at the location
*/
export function getXdefinition(ctx: LSPTextDocumentPositionParams): Observable<SymbolLocationInformation | undefined> {
return forkJoin(getModes(ctx).map(({ mode }) => fetchXdefinition({ ...ctx, mode }))).pipe(
map(results => results.find(v => !!v))
)
}
/**
* Wrap the value in an array. Unlike Lodash's castArray, it maps null to [] (not [null]).
*/
function castArray<T>(value: null | T | T[]): T[] {
if (value === null) {
return []
}
if (!Array.isArray(value)) {
return [value]
}
return value
}
/**
* Fetches references (in the same repository) to the symbol at the given location.
*
* @param ctx the location
* @return references to the symbol at the location
*/
export function getReferences(
ctx: LSPTextDocumentPositionParams & LSPReferencesParams,
{ extensionsController }: ExtensionsControllerProps
): Observable<Location[]> {
return extensionsController.registries.textDocumentReferences
.getLocation({
textDocument: { uri: `git://${ctx.repoPath}?${ctx.commitID}#${ctx.filePath}` },
position: {
character: ctx.position.character - 1,
line: ctx.position.line - 1,
},
context: {
includeDeclaration: ctx.includeDeclaration !== false, // undefined means true
},
})
.pipe(map(castArray))
}
/**
* Fetches implementations (in the same repository) of the symbol at the given location.
*
* @param ctx the location
* @return implementations of the symbol at the location
*/
export function getImplementations(
ctx: LSPTextDocumentPositionParams,
{ extensionsController }: ExtensionsControllerProps
): Observable<Location[]> {
return extensionsController.registries.textDocumentImplementation
.getLocation({
textDocument: { uri: `git://${ctx.repoPath}?${ctx.commitID}#${ctx.filePath}` },
position: {
character: ctx.position.character - 1,
line: ctx.position.line - 1,
},
})
.pipe(map(castArray))
}
/**
* Fetches references in the repository to the symbol described by the repository-independent symbol descriptor.
*
* @param ctx the symbol descriptor and repository to search in
* @return references to the symbol
*/
export function getXreferences(ctx: XReferenceOptions & AbsoluteRepo & LSPSelector): Observable<Location[]> {
return forkJoin(getModes(ctx).map(({ mode }) => fetchXreferences({ ...ctx, mode }))).pipe(
map(results => flatten(results))
)
}
/**
* Fetches decorations for the given file.
*
* @param ctx the file
* @return decorations
*/
export function getDecorations(
ctx: AbsoluteRepoFile & LSPSelector,
{ extensionsController }: ExtensionsControllerProps
): Observable<TextDocumentDecoration[] | null> {
return extensionsController.registries.textDocumentDecoration.getDecorations({
uri: `git://${ctx.repoPath}?${ctx.commitID}#${ctx.filePath}`,
})
}
/** Computes the set of LSP modes to use. */
function getModes(ctx: ModeSpec): { mode: string }[] {
return [{ mode: ctx.mode }]
}
| getDefinition |
comb.rs | //! comb sort algorithm.
//!
//! **O(N²)**
mod utils;
/// Sort in ascending order using a comb sort algorithm.
///
/// ```rust
/// use buldak::comb;
///
/// let mut nums = [1, 4, 2, 3, 5, 111, 234, 21, 13];
/// comb::sort(&mut nums); | /// ```
pub fn sort<T>(array: &mut [T])
where
T: std::cmp::Ord,
{
sort_by(array, |l, r| l.cmp(r))
}
/// Sort in descending order using a comb sort algorithm.
///
/// ```rust
/// use buldak::comb;
///
/// let mut nums = [1, 4, 2, 3, 5, 111, 234, 21, 13];
/// comb::sort_reverse(&mut nums);
/// assert_eq!(nums, [234, 111, 21, 13, 5, 4, 3, 2, 1]);
/// ```
pub fn sort_reverse<T>(array: &mut [T])
where
T: std::cmp::Ord,
{
sort_by(array, |l, r| l.cmp(r).reverse())
}
/// It takes a comparator function to determine the order,
/// and sorts it using a comb sort algorithm.
///
/// ```rust
/// use buldak::comb;
///
/// let mut nums = [1, 4, 2, 3, 5, 111, 234, 21, 13];
/// comb::sort_by(&mut nums, |l, r| l.cmp(r));
/// assert_eq!(nums, [1, 2, 3, 4, 5, 13, 21, 111, 234]);
/// ```
pub fn sort_by<T, F>(array: &mut [T], compare: F)
where
T: std::cmp::Ord,
F: Fn(&T, &T) -> std::cmp::Ordering,
{
_comb_sort_impl(array, compare);
}
fn _comb_sort_impl<T, F>(array: &mut [T], compare: F)
where
T: std::cmp::Ord,
F: Fn(&T, &T) -> std::cmp::Ordering,
{
let mut gap = array.len();
let shrink = 1.3_f64;
let mut sorted = false;
while sorted == false {
gap = (gap as f64 / shrink).floor() as usize;
if gap > 1 {
sorted = false;
} else {
gap = 1;
sorted = true;
}
let mut i = 0;
while i + gap < array.len() {
if compare(&array[i], &array[i + gap]) == std::cmp::Ordering::Greater {
utils::swap(array, i, i + gap);
sorted = false;
}
i += 1;
}
}
} | /// assert_eq!(nums, [1, 2, 3, 4, 5, 13, 21, 111, 234]); |
argumentList_test.go | package analysis
import (
"io/ioutil"
"strconv"
"testing"
"github.com/bradleyjkemp/cupaloy"
"github.com/john-nguyen09/phpintel/internal/lsp/protocol"
"github.com/stretchr/testify/assert"
)
func TestNestedArgumentList(t *testing.T) {
data, err := ioutil.ReadFile("../cases/nestedArgs.php")
if err != nil {
panic(err)
}
document := NewDocument("test1", data)
document.Load()
testOffsets := []int{
308,
345,
}
for _, testOffset := range testOffsets {
argumentList, hasParamsResolvable := document.ArgumentListAndFunctionCallAt(document.positionAt(testOffset))
t.Run(strconv.Itoa(testOffset), func(t *testing.T) {
cupaloy.SnapshotT(t, argumentList, hasParamsResolvable)
})
}
}
func TestArgumentLists(t *testing.T) {
doc := NewDocument("test1", []byte(`<?php
$var1 = 0;
testFunction1()
++$var1;
`))
doc.Load()
argRanges := []protocol.Range{}
TraverseDocument(doc, func(s Symbol) {
if argumentList, ok := s.(*ArgumentList); ok {
argRanges = append(argRanges, argumentList.ranges...)
}
}, nil)
assert.Equal(t, []protocol.Range{
{
Start: protocol.Position{Line: 2, Character: 13},
End: protocol.Position{Line: 2, Character: 15},
},
}, argRanges)
}
func TestNotDuplicatedExpression(t *testing.T) {
data, err := ioutil.ReadFile("../cases/argumentsExpression.php")
assert.NoError(t, err)
document := NewDocument("test1", data)
document.Load()
cupaloy.SnapshotT(t, document.hasTypesSymbols())
}
func TestErrorComma(t *testing.T) {
doc := NewDocument("test1", []byte(`<?php
$abc = $DB->get_record('abc',)`))
doc.Load()
args, _ := doc.ArgumentListAndFunctionCallAt(protocol.Position{
Line: 1,
Character: 29, | assert.Equal(t, []protocol.Range{
{Start: protocol.Position{Line: 1, Character: 22}, End: protocol.Position{Line: 1, Character: 28}},
{Start: protocol.Position{Line: 1, Character: 29}, End: protocol.Position{Line: 1, Character: 30}},
}, args.ranges)
}
func TestDocumentSignatures(t *testing.T) {
data, _ := ioutil.ReadFile("../cases/TaskLog.php")
doc := NewDocument("test1", data)
doc.Load()
befores := []protocol.Position{}
TraverseDocument(doc, func(s Symbol) {
if argumentList, ok := s.(*ArgumentList); ok {
for _, r := range argumentList.GetArgumentRanges() {
befores = append(befores, r.Start)
}
}
}, nil)
assert.Equal(t, []protocol.Position{
{Line: 19, Character: 27},
{Line: 24, Character: 32},
{Line: 24, Character: 45},
{Line: 29, Character: 27},
{Line: 32, Character: 39},
{Line: 32, Character: 47},
{Line: 37, Character: 7},
{Line: 37, Character: 15},
}, befores)
hasTypes := doc.HasTypesBeforePos(protocol.Position{Line: 37, Character: 7})
assert.NotNil(t, hasTypes)
} | }) |
test_verify_flash.py | #!/usr/bin/env python
"""
Copyright 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint:disable=missing-docstring
import logging
import unittest
import time
import os
from test.hardware.test_helper import Helper
import serial
import six
import mbed_lstools
from mbed_flasher.flash import Flash
from mbed_flasher.reset import Reset
from mbed_flasher.return_codes import EXIT_CODE_SUCCESS
def verify_output_per_device(serial_port, command, output):
# print 'Inspecting %s SERIAL device' % serial_port
ser = serial.Serial(
port=serial_port,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
if ser.isOpen():
time.sleep(0.2)
if six.PY2:
ser.write('%s\n\r' % command)
else:
new_command = '%s\n\r' % command
ser.write(new_command.encode('utf-8'))
out = ''
time.sleep(0.5)
while ser.inWaiting() > 0:
if six.PY2:
out += ser.read(1)
else:
out += ser.read(1).decode('utf-8', "replace")
if out.find(output) != -1:
ser.close()
return True
ser.close()
return False
# this is not a const
# pylint: disable=invalid-name
mbed = mbed_lstools.create()
class FlashVerifyTestCase(unittest.TestCase):
"""
Flash verification with Hardware, three step verification for all attached devices:
first flashes the helloworld binary to device and verifies that no response is seen
second flashes found second binary to device and verifies that response is seen
third flashes the helloworld binary to device and verifies that no response is seen
"""
bin_path = os.path.join('test', 'helloworld.bin')
second_bin_path = os.path.join('test', 'example_app_K64F.bin')
def setUp(self):
logging.disable(logging.CRITICAL)
Helper(platform_name='K64F', allowed_files=['DETAILS.TXT', 'MBED.HTM']).clear()
def | (self):
Helper(platform_name='K64F', allowed_files=['DETAILS.TXT', 'MBED.HTM']).clear()
def test_verify_hw_flash(self):
mbeds = mbed_lstools.create()
targets = mbeds.list_mbeds()
flasher = Flash()
target_id = None
serial_port = None
for target in targets:
if target['platform_name'] == 'K64F':
if 'serial_port' and 'target_id' in target:
target_id = target['target_id']
serial_port = target['serial_port']
break
if target_id and serial_port:
ret = flasher.flash(build=self.bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple',
target_filename=self.bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
self.assertEqual(verify_output_per_device(serial_port, 'help', 'echo'), False)
ret = flasher.flash(build=self.second_bin_path,
target_id=target_id, platform_name='K64F',
device_mapping_table=False, method='simple',
target_filename=self.second_bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
if not verify_output_per_device(serial_port, 'help', 'echo'):
self.assertEqual(
verify_output_per_device(serial_port, 'help', 'echo'), True)
ret = flasher.flash(build=self.bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple',
target_filename=self.bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
self.assertEqual(verify_output_per_device(serial_port, 'help', 'echo'), False)
def test_verify_hw_flash_no_reset(self):
mbeds = mbed_lstools.create()
targets = mbeds.list_mbeds()
flasher = Flash()
resetter = Reset()
target_id = None
serial_port = None
for target in targets:
if target['platform_name'] == 'K64F':
if 'serial_port' and 'target_id' in target:
target_id = target['target_id']
serial_port = target['serial_port']
break
if target_id and serial_port:
ret = flasher.flash(build=self.second_bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple')
self.assertEqual(ret, EXIT_CODE_SUCCESS)
if not verify_output_per_device(serial_port, 'help', 'echo'):
self.assertEqual(
verify_output_per_device(serial_port, 'help', 'echo'), True)
ret = flasher.flash(build=self.second_bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple',
no_reset=True,
target_filename=self.second_bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
self.assertEqual(verify_output_per_device(serial_port, 'help', 'echo'), False)
ret = resetter.reset(target_id=target_id, method='simple')
self.assertEqual(ret, EXIT_CODE_SUCCESS)
if not verify_output_per_device(serial_port, 'help', 'echo'):
self.assertEqual(
verify_output_per_device(serial_port, 'help', 'echo'), True)
ret = flasher.flash(build=self.bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple',
target_filename=self.bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
self.assertEqual(verify_output_per_device(serial_port, 'help', 'echo'), False)
if __name__ == '__main__':
unittest.main()
| tearDown |
jquery.dataTables.js | /*! DataTables 1.10.16
* ©2008-2017 SpryMedia Ltd - datatables.net/license
*/
/**
* @summary DataTables
* @description Paginate, search and order HTML tables
* @version 1.10.16
* @file jquery.dataTables.js
* @author SpryMedia Ltd
* @contact www.datatables.net
* @copyright Copyright 2008-2017 SpryMedia Ltd.
*
* This source file is free software, available under the following license:
* MIT license - http://datatables.net/license
*
* This source file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the license files for details.
*
* For details please refer to: http://www.datatables.net
*/
/*jslint evil: true, undef: true, browser: true */
/*globals $,require,jQuery,define,_selector_run,_selector_opts,_selector_first,_selector_row_indexes,_ext,_Api,_api_register,_api_registerPlural,_re_new_lines,_re_html,_re_formatted_numeric,_re_escape_regex,_empty,_intVal,_numToDecimal,_isNumber,_isHtml,_htmlNumeric,_pluck,_pluck_order,_range,_stripHtml,_unique,_fnBuildAjax,_fnAjaxUpdate,_fnAjaxParameters,_fnAjaxUpdateDraw,_fnAjaxDataSrc,_fnAddColumn,_fnColumnOptions,_fnAdjustColumnSizing,_fnVisibleToColumnIndex,_fnColumnIndexToVisible,_fnVisbleColumns,_fnGetColumns,_fnColumnTypes,_fnApplyColumnDefs,_fnHungarianMap,_fnCamelToHungarian,_fnLanguageCompat,_fnBrowserDetect,_fnAddData,_fnAddTr,_fnNodeToDataIndex,_fnNodeToColumnIndex,_fnGetCellData,_fnSetCellData,_fnSplitObjNotation,_fnGetObjectDataFn,_fnSetObjectDataFn,_fnGetDataMaster,_fnClearTable,_fnDeleteIndex,_fnInvalidate,_fnGetRowElements,_fnCreateTr,_fnBuildHead,_fnDrawHead,_fnDraw,_fnReDraw,_fnAddOptionsHtml,_fnDetectHeader,_fnGetUniqueThs,_fnFeatureHtmlFilter,_fnFilterComplete,_fnFilterCustom,_fnFilterColumn,_fnFilter,_fnFilterCreateSearch,_fnEscapeRegex,_fnFilterData,_fnFeatureHtmlInfo,_fnUpdateInfo,_fnInfoMacros,_fnInitialise,_fnInitComplete,_fnLengthChange,_fnFeatureHtmlLength,_fnFeatureHtmlPaginate,_fnPageChange,_fnFeatureHtmlProcessing,_fnProcessingDisplay,_fnFeatureHtmlTable,_fnScrollDraw,_fnApplyToChildren,_fnCalculateColumnWidths,_fnThrottle,_fnConvertToWidth,_fnGetWidestNode,_fnGetMaxLenString,_fnStringToCss,_fnSortFlatten,_fnSort,_fnSortAria,_fnSortListener,_fnSortAttachListener,_fnSortingClasses,_fnSortData,_fnSaveState,_fnLoadState,_fnSettingsFromNode,_fnLog,_fnMap,_fnBindAction,_fnCallbackReg,_fnCallbackFire,_fnLengthOverflow,_fnRenderer,_fnDataSource,_fnRowAttributes*/
(function( factory ) {
"use strict";
if ( typeof define === 'function' && define.amd ) {
// AMD
define( ['jquery'], function ( $ ) {
return factory( $, window, document );
} );
}
else if ( typeof exports === 'object' ) {
// CommonJS
module.exports = function (root, $) {
if ( ! root ) {
// CommonJS environments without a window global must pass a
// root. This will give an error otherwise
root = window;
}
if ( ! $ ) {
$ = typeof window !== 'undefined' ? // jQuery's factory checks for a global window
require('jquery') :
require('jquery')( root );
}
return factory( $, root, root.document );
};
}
else {
// Browser
factory( jQuery, window, document );
}
}
(function( $, window, document, undefined ) {
"use strict";
/**
* DataTables is a plug-in for the jQuery Javascript library. It is a highly
* flexible tool, based upon the foundations of progressive enhancement,
* which will add advanced interaction controls to any HTML table. For a
* full list of features please refer to
* [DataTables.net](href="http://datatables.net).
*
* Note that the `DataTable` object is not a global variable but is aliased
* to `jQuery.fn.DataTable` and `jQuery.fn.dataTable` through which it may
* be accessed.
*
* @class
* @param {object} [init={}] Configuration object for DataTables. Options
* are defined by {@link DataTable.defaults}
* @requires jQuery 1.7+
*
* @example
* // Basic initialisation
* $(document).ready( function {
* $('#example').dataTable();
* } );
*
* @example
* // Initialisation with configuration options - in this case, disable
* // pagination and sorting.
* $(document).ready( function {
* $('#example').dataTable( {
* "paginate": false,
* "sort": false
* } );
* } );
*/
var DataTable = function ( options )
{
/**
* Perform a jQuery selector action on the table's TR elements (from the tbody) and
* return the resulting jQuery object.
* @param {string|node|jQuery} sSelector jQuery selector or node collection to act on
* @param {object} [oOpts] Optional parameters for modifying the rows to be included
* @param {string} [oOpts.filter=none] Select TR elements that meet the current filter
* criterion ("applied") or all TR elements (i.e. no filter).
* @param {string} [oOpts.order=current] Order of the TR elements in the processed array.
* Can be either 'current', whereby the current sorting of the table is used, or
* 'original' whereby the original order the data was read into the table is used.
* @param {string} [oOpts.page=all] Limit the selection to the currently displayed page
* ("current") or not ("all"). If 'current' is given, then order is assumed to be
* 'current' and filter is 'applied', regardless of what they might be given as.
* @returns {object} jQuery object, filtered by the given selector.
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Highlight every second row
* oTable.$('tr:odd').css('backgroundColor', 'blue');
* } );
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Filter to rows with 'Webkit' in them, add a background colour and then
* // remove the filter, thus highlighting the 'Webkit' rows only.
* oTable.fnFilter('Webkit');
* oTable.$('tr', {"search": "applied"}).css('backgroundColor', 'blue');
* oTable.fnFilter('');
* } );
*/
this.$ = function ( sSelector, oOpts )
{
return this.api(true).$( sSelector, oOpts );
};
/**
* Almost identical to $ in operation, but in this case returns the data for the matched
* rows - as such, the jQuery selector used should match TR row nodes or TD/TH cell nodes
* rather than any descendants, so the data can be obtained for the row/cell. If matching
* rows are found, the data returned is the original data array/object that was used to
* create the row (or a generated array if from a DOM source).
*
* This method is often useful in-combination with $ where both functions are given the
* same parameters and the array indexes will match identically.
* @param {string|node|jQuery} sSelector jQuery selector or node collection to act on
* @param {object} [oOpts] Optional parameters for modifying the rows to be included
* @param {string} [oOpts.filter=none] Select elements that meet the current filter
* criterion ("applied") or all elements (i.e. no filter).
* @param {string} [oOpts.order=current] Order of the data in the processed array.
* Can be either 'current', whereby the current sorting of the table is used, or
* 'original' whereby the original order the data was read into the table is used.
* @param {string} [oOpts.page=all] Limit the selection to the currently displayed page
* ("current") or not ("all"). If 'current' is given, then order is assumed to be
* 'current' and filter is 'applied', regardless of what they might be given as.
* @returns {array} Data for the matched elements. If any elements, as a result of the
* selector, were not TR, TD or TH elements in the DataTable, they will have a null
* entry in the array.
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Get the data from the first row in the table
* var data = oTable._('tr:first');
*
* // Do something useful with the data
* alert( "First cell is: "+data[0] );
* } );
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Filter to 'Webkit' and get all data for
* oTable.fnFilter('Webkit');
* var data = oTable._('tr', {"search": "applied"});
*
* // Do something with the data
* alert( data.length+" rows matched the search" );
* } );
*/
this._ = function ( sSelector, oOpts )
{
return this.api(true).rows( sSelector, oOpts ).data();
};
/**
* Create a DataTables Api instance, with the currently selected tables for
* the Api's context.
* @param {boolean} [traditional=false] Set the API instance's context to be
* only the table referred to by the `DataTable.ext.iApiIndex` option, as was
* used in the API presented by DataTables 1.9- (i.e. the traditional mode),
* or if all tables captured in the jQuery object should be used.
* @return {DataTables.Api}
*/
this.api = function ( traditional )
{
return traditional ?
new _Api(
_fnSettingsFromNode( this[ _ext.iApiIndex ] )
) :
new _Api( this );
};
/**
* Add a single new row or multiple rows of data to the table. Please note
* that this is suitable for client-side processing only - if you are using
* server-side processing (i.e. "bServerSide": true), then to add data, you
* must add it to the data source, i.e. the server-side, through an Ajax call.
* @param {array|object} data The data to be added to the table. This can be:
* <ul>
* <li>1D array of data - add a single row with the data provided</li>
* <li>2D array of arrays - add multiple rows in a single call</li>
* <li>object - data object when using <i>mData</i></li>
* <li>array of objects - multiple data objects when using <i>mData</i></li>
* </ul>
* @param {bool} [redraw=true] redraw the table or not
* @returns {array} An array of integers, representing the list of indexes in
* <i>aoData</i> ({@link DataTable.models.oSettings}) that have been added to
* the table.
* @dtopt API
* @deprecated Since v1.10
*
* @example
* // Global var for counter
* var giCount = 2;
*
* $(document).ready(function() {
* $('#example').dataTable();
* } );
*
* function fnClickAddRow() {
* $('#example').dataTable().fnAddData( [
* giCount+".1",
* giCount+".2",
* giCount+".3",
* giCount+".4" ]
* );
*
* giCount++;
* }
*/
this.fnAddData = function( data, redraw )
{
var api = this.api( true );
/* Check if we want to add multiple rows or not */
var rows = $.isArray(data) && ( $.isArray(data[0]) || $.isPlainObject(data[0]) ) ?
api.rows.add( data ) :
api.row.add( data );
if ( redraw === undefined || redraw ) {
api.draw();
}
return rows.flatten().toArray();
};
/**
* This function will make DataTables recalculate the column sizes, based on the data
* contained in the table and the sizes applied to the columns (in the DOM, CSS or
* through the sWidth parameter). This can be useful when the width of the table's
* parent element changes (for example a window resize).
* @param {boolean} [bRedraw=true] Redraw the table or not, you will typically want to
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable( {
* "sScrollY": "200px",
* "bPaginate": false
* } );
*
* $(window).on('resize', function () {
* oTable.fnAdjustColumnSizing();
* } );
* } );
*/
this.fnAdjustColumnSizing = function ( bRedraw )
{
var api = this.api( true ).columns.adjust();
var settings = api.settings()[0];
var scroll = settings.oScroll;
if ( bRedraw === undefined || bRedraw ) {
api.draw( false );
}
else if ( scroll.sX !== "" || scroll.sY !== "" ) {
/* If not redrawing, but scrolling, we want to apply the new column sizes anyway */
_fnScrollDraw( settings );
}
};
/**
* Quickly and simply clear a table
* @param {bool} [bRedraw=true] redraw the table or not
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Immediately 'nuke' the current rows (perhaps waiting for an Ajax callback...)
* oTable.fnClearTable();
* } );
*/
this.fnClearTable = function( bRedraw )
{
var api = this.api( true ).clear();
if ( bRedraw === undefined || bRedraw ) {
api.draw();
}
};
/**
* The exact opposite of 'opening' a row, this function will close any rows which
* are currently 'open'.
* @param {node} nTr the table row to 'close'
* @returns {int} 0 on success, or 1 if failed (can't find the row)
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable;
*
* // 'open' an information row when a row is clicked on
* $('#example tbody tr').click( function () {
* if ( oTable.fnIsOpen(this) ) {
* oTable.fnClose( this );
* } else {
* oTable.fnOpen( this, "Temporary row opened", "info_row" );
* }
* } );
*
* oTable = $('#example').dataTable();
* } );
*/
this.fnClose = function( nTr )
{
this.api( true ).row( nTr ).child.hide();
};
/**
* Remove a row for the table
* @param {mixed} target The index of the row from aoData to be deleted, or
* the TR element you want to delete
* @param {function|null} [callBack] Callback function
* @param {bool} [redraw=true] Redraw the table or not
* @returns {array} The row that was deleted
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Immediately remove the first row
* oTable.fnDeleteRow( 0 );
* } );
*/
this.fnDeleteRow = function( target, callback, redraw )
{
var api = this.api( true );
var rows = api.rows( target );
var settings = rows.settings()[0];
var data = settings.aoData[ rows[0][0] ];
rows.remove();
if ( callback ) {
callback.call( this, settings, data );
}
if ( redraw === undefined || redraw ) {
api.draw();
}
return data;
};
/**
* Restore the table to it's original state in the DOM by removing all of DataTables
* enhancements, alterations to the DOM structure of the table and event listeners.
* @param {boolean} [remove=false] Completely remove the table from the DOM
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* // This example is fairly pointless in reality, but shows how fnDestroy can be used
* var oTable = $('#example').dataTable();
* oTable.fnDestroy();
* } );
*/
this.fnDestroy = function ( remove )
{
this.api( true ).destroy( remove );
};
/**
* Redraw the table
* @param {bool} [complete=true] Re-filter and resort (if enabled) the table before the draw.
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Re-draw the table - you wouldn't want to do it here, but it's an example :-)
* oTable.fnDraw();
* } );
*/
this.fnDraw = function( complete )
{
// Note that this isn't an exact match to the old call to _fnDraw - it takes
// into account the new data, but can hold position.
this.api( true ).draw( complete );
};
/**
* Filter the input based on data
* @param {string} sInput String to filter the table on
* @param {int|null} [iColumn] Column to limit filtering to
* @param {bool} [bRegex=false] Treat as regular expression or not
* @param {bool} [bSmart=true] Perform smart filtering or not
* @param {bool} [bShowGlobal=true] Show the input global filter in it's input box(es)
* @param {bool} [bCaseInsensitive=true] Do case-insensitive matching (true) or not (false)
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Sometime later - filter...
* oTable.fnFilter( 'test string' );
* } );
*/
this.fnFilter = function( sInput, iColumn, bRegex, bSmart, bShowGlobal, bCaseInsensitive )
{
var api = this.api( true );
if ( iColumn === null || iColumn === undefined ) {
api.search( sInput, bRegex, bSmart, bCaseInsensitive );
}
else {
api.column( iColumn ).search( sInput, bRegex, bSmart, bCaseInsensitive );
}
api.draw();
};
/**
* Get the data for the whole table, an individual row or an individual cell based on the
* provided parameters.
* @param {int|node} [src] A TR row node, TD/TH cell node or an integer. If given as
* a TR node then the data source for the whole row will be returned. If given as a
* TD/TH cell node then iCol will be automatically calculated and the data for the
* cell returned. If given as an integer, then this is treated as the aoData internal
* data index for the row (see fnGetPosition) and the data for that row used.
* @param {int} [col] Optional column index that you want the data of.
* @returns {array|object|string} If mRow is undefined, then the data for all rows is
* returned. If mRow is defined, just data for that row, and is iCol is
* defined, only data for the designated cell is returned.
* @dtopt API
* @deprecated Since v1.10
*
* @example
* // Row data
* $(document).ready(function() {
* oTable = $('#example').dataTable();
*
* oTable.$('tr').click( function () {
* var data = oTable.fnGetData( this );
* // ... do something with the array / object of data for the row
* } );
* } );
*
* @example
* // Individual cell data
* $(document).ready(function() {
* oTable = $('#example').dataTable();
*
* oTable.$('td').click( function () {
* var sData = oTable.fnGetData( this );
* alert( 'The cell clicked on had the value of '+sData );
* } );
* } );
*/
this.fnGetData = function( src, col )
{
var api = this.api( true );
if ( src !== undefined ) {
var type = src.nodeName ? src.nodeName.toLowerCase() : '';
return col !== undefined || type == 'td' || type == 'th' ?
api.cell( src, col ).data() :
api.row( src ).data() || null;
}
return api.data().toArray();
};
/**
* Get an array of the TR nodes that are used in the table's body. Note that you will
* typically want to use the '$' API method in preference to this as it is more
* flexible.
* @param {int} [iRow] Optional row index for the TR element you want
* @returns {array|node} If iRow is undefined, returns an array of all TR elements
* in the table's body, or iRow is defined, just the TR element requested.
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Get the nodes from the table
* var nNodes = oTable.fnGetNodes( );
* } );
*/
this.fnGetNodes = function( iRow )
{
var api = this.api( true );
return iRow !== undefined ?
api.row( iRow ).node() :
api.rows().nodes().flatten().toArray();
};
/**
* Get the array indexes of a particular cell from it's DOM element
* and column index including hidden columns
* @param {node} node this can either be a TR, TD or TH in the table's body
* @returns {int} If nNode is given as a TR, then a single index is returned, or
* if given as a cell, an array of [row index, column index (visible),
* column index (all)] is given.
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* $('#example tbody td').click( function () {
* // Get the position of the current data from the node
* var aPos = oTable.fnGetPosition( this );
*
* // Get the data array for this row
* var aData = oTable.fnGetData( aPos[0] );
*
* // Update the data array and return the value
* aData[ aPos[1] ] = 'clicked';
* this.innerHTML = 'clicked';
* } );
*
* // Init DataTables
* oTable = $('#example').dataTable();
* } );
*/
this.fnGetPosition = function( node )
{
var api = this.api( true );
var nodeName = node.nodeName.toUpperCase();
if ( nodeName == 'TR' ) {
return api.row( node ).index();
}
else if ( nodeName == 'TD' || nodeName == 'TH' ) {
var cell = api.cell( node ).index();
return [
cell.row,
cell.columnVisible,
cell.column
];
}
return null;
};
/**
* Check to see if a row is 'open' or not.
* @param {node} nTr the table row to check
* @returns {boolean} true if the row is currently open, false otherwise
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable;
*
* // 'open' an information row when a row is clicked on
* $('#example tbody tr').click( function () {
* if ( oTable.fnIsOpen(this) ) {
* oTable.fnClose( this );
* } else {
* oTable.fnOpen( this, "Temporary row opened", "info_row" );
* }
* } );
*
* oTable = $('#example').dataTable();
* } );
*/
this.fnIsOpen = function( nTr )
{
return this.api( true ).row( nTr ).child.isShown();
};
/**
* This function will place a new row directly after a row which is currently
* on display on the page, with the HTML contents that is passed into the
* function. This can be used, for example, to ask for confirmation that a
* particular record should be deleted.
* @param {node} nTr The table row to 'open'
* @param {string|node|jQuery} mHtml The HTML to put into the row
* @param {string} sClass Class to give the new TD cell
* @returns {node} The row opened. Note that if the table row passed in as the
* first parameter, is not found in the table, this method will silently
* return.
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable;
*
* // 'open' an information row when a row is clicked on
* $('#example tbody tr').click( function () {
* if ( oTable.fnIsOpen(this) ) {
* oTable.fnClose( this );
* } else {
* oTable.fnOpen( this, "Temporary row opened", "info_row" );
* }
* } );
*
* oTable = $('#example').dataTable();
* } );
*/
this.fnOpen = function( nTr, mHtml, sClass )
{
return this.api( true )
.row( nTr )
.child( mHtml, sClass )
.show()
.child()[0];
};
/**
* Change the pagination - provides the internal logic for pagination in a simple API
* function. With this function you can have a DataTables table go to the next,
* previous, first or last pages.
* @param {string|int} mAction Paging action to take: "first", "previous", "next" or "last"
* or page number to jump to (integer), note that page 0 is the first page.
* @param {bool} [bRedraw=true] Redraw the table or not
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
* oTable.fnPageChange( 'next' );
* } );
*/
this.fnPageChange = function ( mAction, bRedraw )
{
var api = this.api( true ).page( mAction );
if ( bRedraw === undefined || bRedraw ) {
api.draw(false);
}
};
/**
* Show a particular column
* @param {int} iCol The column whose display should be changed
* @param {bool} bShow Show (true) or hide (false) the column
* @param {bool} [bRedraw=true] Redraw the table or not
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Hide the second column after initialisation
* oTable.fnSetColumnVis( 1, false );
* } );
*/
this.fnSetColumnVis = function ( iCol, bShow, bRedraw )
{
var api = this.api( true ).column( iCol ).visible( bShow );
if ( bRedraw === undefined || bRedraw ) {
api.columns.adjust().draw();
}
};
/**
* Get the settings for a particular table for external manipulation
* @returns {object} DataTables settings object. See
* {@link DataTable.models.oSettings}
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
* var oSettings = oTable.fnSettings();
*
* // Show an example parameter from the settings
* alert( oSettings._iDisplayStart );
* } );
*/
this.fnSettings = function()
{
return _fnSettingsFromNode( this[_ext.iApiIndex] );
};
/**
* Sort the table by a particular column
* @param {int} iCol the data index to sort on. Note that this will not match the
* 'display index' if you have hidden data entries
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Sort immediately with columns 0 and 1
* oTable.fnSort( [ [0,'asc'], [1,'asc'] ] );
* } );
*/
this.fnSort = function( aaSort )
{
this.api( true ).order( aaSort ).draw();
};
/**
* Attach a sort listener to an element for a given column
* @param {node} nNode the element to attach the sort listener to
* @param {int} iColumn the column that a click on this node will sort on
* @param {function} [fnCallback] callback function when sort is run
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
*
* // Sort on column 1, when 'sorter' is clicked on
* oTable.fnSortListener( document.getElementById('sorter'), 1 );
* } );
*/
this.fnSortListener = function( nNode, iColumn, fnCallback )
{
this.api( true ).order.listener( nNode, iColumn, fnCallback );
};
/**
* Update a table cell or row - this method will accept either a single value to
* update the cell with, an array of values with one element for each column or
* an object in the same format as the original data source. The function is
* self-referencing in order to make the multi column updates easier.
* @param {object|array|string} mData Data to update the cell/row with
* @param {node|int} mRow TR element you want to update or the aoData index
* @param {int} [iColumn] The column to update, give as null or undefined to
* update a whole row.
* @param {bool} [bRedraw=true] Redraw the table or not
* @param {bool} [bAction=true] Perform pre-draw actions or not
* @returns {int} 0 on success, 1 on error
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
* oTable.fnUpdate( 'Example update', 0, 0 ); // Single cell
* oTable.fnUpdate( ['a', 'b', 'c', 'd', 'e'], $('tbody tr')[0] ); // Row
* } );
*/
this.fnUpdate = function( mData, mRow, iColumn, bRedraw, bAction )
{
var api = this.api( true );
if ( iColumn === undefined || iColumn === null ) {
api.row( mRow ).data( mData );
}
else {
api.cell( mRow, iColumn ).data( mData );
}
if ( bAction === undefined || bAction ) {
api.columns.adjust();
}
if ( bRedraw === undefined || bRedraw ) {
api.draw();
}
return 0;
};
/**
* Provide a common method for plug-ins to check the version of DataTables being used, in order
* to ensure compatibility.
* @param {string} sVersion Version string to check for, in the format "X.Y.Z". Note that the
* formats "X" and "X.Y" are also acceptable.
* @returns {boolean} true if this version of DataTables is greater or equal to the required
* version, or false if this version of DataTales is not suitable
* @method
* @dtopt API
* @deprecated Since v1.10
*
* @example
* $(document).ready(function() {
* var oTable = $('#example').dataTable();
* alert( oTable.fnVersionCheck( '1.9.0' ) );
* } );
*/
this.fnVersionCheck = _ext.fnVersionCheck;
var _that = this;
var emptyInit = options === undefined;
var len = this.length;
if ( emptyInit ) {
options = {};
}
this.oApi = this.internal = _ext.internal;
// Extend with old style plug-in API methods
for ( var fn in DataTable.ext.internal ) {
if ( fn ) {
this[fn] = _fnExternApiFunc(fn);
}
}
this.each(function() {
// For each initialisation we want to give it a clean initialisation
// object that can be bashed around
var o = {};
var oInit = len > 1 ? // optimisation for single table case
_fnExtend( o, options, true ) :
options;
/*global oInit,_that,emptyInit*/
var i=0, iLen, j, jLen, k, kLen;
var sId = this.getAttribute( 'id' );
var bInitHandedOff = false;
var defaults = DataTable.defaults;
var $this = $(this);
/* Sanity check */
if ( this.nodeName.toLowerCase() != 'table' )
{
_fnLog( null, 0, 'Non-table node initialisation ('+this.nodeName+')', 2 );
return;
}
/* Backwards compatibility for the defaults */
_fnCompatOpts( defaults );
_fnCompatCols( defaults.column );
/* Convert the camel-case defaults to Hungarian */
_fnCamelToHungarian( defaults, defaults, true );
_fnCamelToHungarian( defaults.column, defaults.column, true );
/* Setting up the initialisation object */
_fnCamelToHungarian( defaults, $.extend( oInit, $this.data() ) );
/* Check to see if we are re-initialising a table */
var allSettings = DataTable.settings;
for ( i=0, iLen=allSettings.length ; i<iLen ; i++ )
{
var s = allSettings[i];
/* Base check on table node */
if ( s.nTable == this || s.nTHead.parentNode == this || (s.nTFoot && s.nTFoot.parentNode == this) )
{
var bRetrieve = oInit.bRetrieve !== undefined ? oInit.bRetrieve : defaults.bRetrieve;
var bDestroy = oInit.bDestroy !== undefined ? oInit.bDestroy : defaults.bDestroy;
if ( emptyInit || bRetrieve )
{
return s.oInstance;
}
else if ( bDestroy )
{
s.oInstance.fnDestroy();
break;
}
else
{
_fnLog( s, 0, 'Cannot reinitialise DataTable', 3 );
return;
}
}
/* If the element we are initialising has the same ID as a table which was previously
* initialised, but the table nodes don't match (from before) then we destroy the old
* instance by simply deleting it. This is under the assumption that the table has been
* destroyed by other methods. Anyone using non-id selectors will need to do this manually
*/
if ( s.sTableId == this.id )
{
allSettings.splice( i, 1 );
break;
}
}
/* Ensure the table has an ID - required for accessibility */
if ( sId === null || sId === "" )
{
sId = "DataTables_Table_"+(DataTable.ext._unique++);
this.id = sId;
}
/* Create the settings object for this table and set some of the default parameters */
var oSettings = $.extend( true, {}, DataTable.models.oSettings, {
"sDestroyWidth": $this[0].style.width,
"sInstance": sId,
"sTableId": sId
} );
oSettings.nTable = this;
oSettings.oApi = _that.internal;
oSettings.oInit = oInit;
allSettings.push( oSettings );
// Need to add the instance after the instance after the settings object has been added
// to the settings array, so we can self reference the table instance if more than one
oSettings.oInstance = (_that.length===1) ? _that : $this.dataTable();
// Backwards compatibility, before we apply all the defaults
_fnCompatOpts( oInit );
if ( oInit.oLanguage )
{
_fnLanguageCompat( oInit.oLanguage );
}
// If the length menu is given, but the init display length is not, use the length menu
if ( oInit.aLengthMenu && ! oInit.iDisplayLength )
{
oInit.iDisplayLength = $.isArray( oInit.aLengthMenu[0] ) ?
oInit.aLengthMenu[0][0] : oInit.aLengthMenu[0];
}
// Apply the defaults and init options to make a single init object will all
// options defined from defaults and instance options.
oInit = _fnExtend( $.extend( true, {}, defaults ), oInit );
// Map the initialisation options onto the settings object
_fnMap( oSettings.oFeatures, oInit, [
"bPaginate",
"bLengthChange",
"bFilter",
"bSort",
"bSortMulti",
"bInfo",
"bProcessing",
"bAutoWidth",
"bSortClasses",
"bServerSide",
"bDeferRender"
] );
_fnMap( oSettings, oInit, [
"asStripeClasses",
"ajax",
"fnServerData",
"fnFormatNumber",
"sServerMethod",
"aaSorting",
"aaSortingFixed",
"aLengthMenu",
"sPaginationType",
"sAjaxSource",
"sAjaxDataProp",
"iStateDuration",
"sDom",
"bSortCellsTop",
"iTabIndex",
"fnStateLoadCallback",
"fnStateSaveCallback",
"renderer",
"searchDelay",
"rowId",
[ "iCookieDuration", "iStateDuration" ], // backwards compat
[ "oSearch", "oPreviousSearch" ],
[ "aoSearchCols", "aoPreSearchCols" ],
[ "iDisplayLength", "_iDisplayLength" ]
] );
_fnMap( oSettings.oScroll, oInit, [
[ "sScrollX", "sX" ],
[ "sScrollXInner", "sXInner" ],
[ "sScrollY", "sY" ],
[ "bScrollCollapse", "bCollapse" ]
] );
_fnMap( oSettings.oLanguage, oInit, "fnInfoCallback" );
/* Callback functions which are array driven */
_fnCallbackReg( oSettings, 'aoDrawCallback', oInit.fnDrawCallback, 'user' );
_fnCallbackReg( oSettings, 'aoServerParams', oInit.fnServerParams, 'user' );
_fnCallbackReg( oSettings, 'aoStateSaveParams', oInit.fnStateSaveParams, 'user' );
_fnCallbackReg( oSettings, 'aoStateLoadParams', oInit.fnStateLoadParams, 'user' );
_fnCallbackReg( oSettings, 'aoStateLoaded', oInit.fnStateLoaded, 'user' );
_fnCallbackReg( oSettings, 'aoRowCallback', oInit.fnRowCallback, 'user' );
_fnCallbackReg( oSettings, 'aoRowCreatedCallback', oInit.fnCreatedRow, 'user' );
_fnCallbackReg( oSettings, 'aoHeaderCallback', oInit.fnHeaderCallback, 'user' );
_fnCallbackReg( oSettings, 'aoFooterCallback', oInit.fnFooterCallback, 'user' );
_fnCallbackReg( oSettings, 'aoInitComplete', oInit.fnInitComplete, 'user' );
_fnCallbackReg( oSettings, 'aoPreDrawCallback', oInit.fnPreDrawCallback, 'user' );
oSettings.rowIdFn = _fnGetObjectDataFn( oInit.rowId );
/* Browser support detection */
_fnBrowserDetect( oSettings );
var oClasses = oSettings.oClasses;
$.extend( oClasses, DataTable.ext.classes, oInit.oClasses );
$this.addClass( oClasses.sTable );
if ( oSettings.iInitDisplayStart === undefined )
{
/* Display start point, taking into account the save saving */
oSettings.iInitDisplayStart = oInit.iDisplayStart;
oSettings._iDisplayStart = oInit.iDisplayStart;
}
if ( oInit.iDeferLoading !== null )
{
oSettings.bDeferLoading = true;
var tmp = $.isArray( oInit.iDeferLoading );
oSettings._iRecordsDisplay = tmp ? oInit.iDeferLoading[0] : oInit.iDeferLoading;
oSettings._iRecordsTotal = tmp ? oInit.iDeferLoading[1] : oInit.iDeferLoading;
}
/* Language definitions */
var oLanguage = oSettings.oLanguage;
$.extend( true, oLanguage, oInit.oLanguage );
if ( oLanguage.sUrl )
{
/* Get the language definitions from a file - because this Ajax call makes the language
* get async to the remainder of this function we use bInitHandedOff to indicate that
* _fnInitialise will be fired by the returned Ajax handler, rather than the constructor
*/
$.ajax( {
dataType: 'json',
url: oLanguage.sUrl,
success: function ( json ) {
_fnLanguageCompat( json );
_fnCamelToHungarian( defaults.oLanguage, json );
$.extend( true, oLanguage, json );
_fnInitialise( oSettings );
},
error: function () {
// Error occurred loading language file, continue on as best we can
_fnInitialise( oSettings );
}
} );
bInitHandedOff = true;
}
/*
* Stripes
*/
if ( oInit.asStripeClasses === null )
{
oSettings.asStripeClasses =[
oClasses.sStripeOdd,
oClasses.sStripeEven
];
}
/* Remove row stripe classes if they are already on the table row */
var stripeClasses = oSettings.asStripeClasses;
var rowOne = $this.children('tbody').find('tr').eq(0);
if ( $.inArray( true, $.map( stripeClasses, function(el, i) {
return rowOne.hasClass(el);
} ) ) !== -1 ) {
$('tbody tr', this).removeClass( stripeClasses.join(' ') );
oSettings.asDestroyStripes = stripeClasses.slice();
}
/*
* Columns
* See if we should load columns automatically or use defined ones
*/
var anThs = [];
var aoColumnsInit;
var nThead = this.getElementsByTagName('thead');
if ( nThead.length !== 0 )
{
_fnDetectHeader( oSettings.aoHeader, nThead[0] );
anThs = _fnGetUniqueThs( oSettings );
}
/* If not given a column array, generate one with nulls */
if ( oInit.aoColumns === null )
{
aoColumnsInit = [];
for ( i=0, iLen=anThs.length ; i<iLen ; i++ )
{
aoColumnsInit.push( null );
}
}
else
{
aoColumnsInit = oInit.aoColumns;
}
/* Add the columns */
for ( i=0, iLen=aoColumnsInit.length ; i<iLen ; i++ )
{
_fnAddColumn( oSettings, anThs ? anThs[i] : null );
}
/* Apply the column definitions */
_fnApplyColumnDefs( oSettings, oInit.aoColumnDefs, aoColumnsInit, function (iCol, oDef) {
_fnColumnOptions( oSettings, iCol, oDef );
} );
/* HTML5 attribute detection - build an mData object automatically if the
* attributes are found
*/
if ( rowOne.length ) {
var a = function ( cell, name ) {
return cell.getAttribute( 'data-'+name ) !== null ? name : null;
};
$( rowOne[0] ).children('th, td').each( function (i, cell) {
var col = oSettings.aoColumns[i];
if ( col.mData === i ) {
var sort = a( cell, 'sort' ) || a( cell, 'order' );
var filter = a( cell, 'filter' ) || a( cell, 'search' );
if ( sort !== null || filter !== null ) {
col.mData = {
_: i+'.display',
sort: sort !== null ? i+'.@data-'+sort : undefined,
type: sort !== null ? i+'.@data-'+sort : undefined,
filter: filter !== null ? i+'.@data-'+filter : undefined
};
_fnColumnOptions( oSettings, i );
}
}
} );
}
var features = oSettings.oFeatures;
var loadedInit = function () {
/*
* Sorting
* @todo For modularisation (1.11) this needs to do into a sort start up handler
*/
// If aaSorting is not defined, then we use the first indicator in asSorting
// in case that has been altered, so the default sort reflects that option
if ( oInit.aaSorting === undefined ) {
var sorting = oSettings.aaSorting;
for ( i=0, iLen=sorting.length ; i<iLen ; i++ ) {
sorting[i][1] = oSettings.aoColumns[ i ].asSorting[0];
}
}
/* Do a first pass on the sorting classes (allows any size changes to be taken into
* account, and also will apply sorting disabled classes if disabled
*/
_fnSortingClasses( oSettings );
if ( features.bSort ) {
_fnCallbackReg( oSettings, 'aoDrawCallback', function () {
if ( oSettings.bSorted ) {
var aSort = _fnSortFlatten( oSettings );
var sortedColumns = {};
$.each( aSort, function (i, val) {
sortedColumns[ val.src ] = val.dir;
} );
_fnCallbackFire( oSettings, null, 'order', [oSettings, aSort, sortedColumns] );
_fnSortAria( oSettings );
}
} );
}
_fnCallbackReg( oSettings, 'aoDrawCallback', function () {
if ( oSettings.bSorted || _fnDataSource( oSettings ) === 'ssp' || features.bDeferRender ) {
_fnSortingClasses( oSettings );
}
}, 'sc' );
/*
* Final init
* Cache the header, body and footer as required, creating them if needed
*/
// Work around for Webkit bug 83867 - store the caption-side before removing from doc
var captions = $this.children('caption').each( function () {
this._captionSide = $(this).css('caption-side');
} );
var thead = $this.children('thead');
if ( thead.length === 0 ) {
thead = $('<thead/>').appendTo($this);
}
oSettings.nTHead = thead[0];
var tbody = $this.children('tbody');
if ( tbody.length === 0 ) {
tbody = $('<tbody/>').appendTo($this);
}
oSettings.nTBody = tbody[0];
var tfoot = $this.children('tfoot');
if ( tfoot.length === 0 && captions.length > 0 && (oSettings.oScroll.sX !== "" || oSettings.oScroll.sY !== "") ) {
// If we are a scrolling table, and no footer has been given, then we need to create
// a tfoot element for the caption element to be appended to
tfoot = $('<tfoot/>').appendTo($this);
}
if ( tfoot.length === 0 || tfoot.children().length === 0 ) {
$this.addClass( oClasses.sNoFooter );
}
else if ( tfoot.length > 0 ) {
oSettings.nTFoot = tfoot[0];
_fnDetectHeader( oSettings.aoFooter, oSettings.nTFoot );
}
/* Check if there is data passing into the constructor */
if ( oInit.aaData ) {
for ( i=0 ; i<oInit.aaData.length ; i++ ) {
_fnAddData( oSettings, oInit.aaData[ i ] );
}
}
else if ( oSettings.bDeferLoading || _fnDataSource( oSettings ) == 'dom' ) {
/* Grab the data from the page - only do this when deferred loading or no Ajax
* source since there is no point in reading the DOM data if we are then going
* to replace it with Ajax data
*/
_fnAddTr( oSettings, $(oSettings.nTBody).children('tr') );
}
/* Copy the data index array */
oSettings.aiDisplay = oSettings.aiDisplayMaster.slice();
/* Initialisation complete - table can be drawn */
oSettings.bInitialised = true;
/* Check if we need to initialise the table (it might not have been handed off to the
* language processor)
*/
if ( bInitHandedOff === false ) {
_fnInitialise( oSettings );
}
};
/* Must be done after everything which can be overridden by the state saving! */
if ( oInit.bStateSave )
{
features.bStateSave = true;
_fnCallbackReg( oSettings, 'aoDrawCallback', _fnSaveState, 'state_save' );
_fnLoadState( oSettings, oInit, loadedInit );
}
else {
loadedInit();
}
} );
_that = null;
return this;
};
/*
* It is useful to have variables which are scoped locally so only the
* DataTables functions can access them and they don't leak into global space.
* At the same time these functions are often useful over multiple files in the
* core and API, so we list, or at least document, all variables which are used
* by DataTables as private variables here. This also ensures that there is no
* clashing of variable names and that they can easily referenced for reuse.
*/
// Defined else where
// _selector_run
// _selector_opts
// _selector_first
// _selector_row_indexes
var _ext; // DataTable.ext
var _Api; // DataTable.Api
var _api_register; // DataTable.Api.register
var _api_registerPlural; // DataTable.Api.registerPlural
var _re_dic = {};
var _re_new_lines = /[\r\n]/g;
var _re_html = /<.*?>/g;
// This is not strict ISO8601 - Date.parse() is quite lax, although
// implementations differ between browsers.
var _re_date = /^\d{2,4}[\.\/\-]\d{1,2}[\.\/\-]\d{1,2}([T ]{1}\d{1,2}[:\.]\d{2}([\.:]\d{2})?)?$/;
// Escape regular expression special characters
var _re_escape_regex = new RegExp( '(\\' + [ '/', '.', '*', '+', '?', '|', '(', ')', '[', ']', '{', '}', '\\', '$', '^', '-' ].join('|\\') + ')', 'g' );
// http://en.wikipedia.org/wiki/Foreign_exchange_market
// - \u20BD - Russian ruble.
// - \u20a9 - South Korean Won
// - \u20BA - Turkish Lira
// - \u20B9 - Indian Rupee
// - R - Brazil (R$) and South Africa
// - fr - Swiss Franc
// - kr - Swedish krona, Norwegian krone and Danish krone
// - \u2009 is thin space and \u202F is narrow no-break space, both used in many
// standards as thousands separators.
var _re_formatted_numeric = /[',$£€¥%\u2009\u202F\u20BD\u20a9\u20BArfk]/gi;
var _empty = function ( d ) {
return !d || d === true || d === '-' ? true : false;
};
var _intVal = function ( s ) {
var integer = parseInt( s, 10 );
return !isNaN(integer) && isFinite(s) ? integer : null;
};
// Convert from a formatted number with characters other than `.` as the
// decimal place, to a Javascript number
var _numToDecimal = function ( num, decimalPoint ) {
// Cache created regular expressions for speed as this function is called often
if ( ! _re_dic[ decimalPoint ] ) {
_re_dic[ decimalPoint ] = new RegExp( _fnEscapeRegex( decimalPoint ), 'g' );
}
return typeof num === 'string' && decimalPoint !== '.' ?
num.replace( /\./g, '' ).replace( _re_dic[ decimalPoint ], '.' ) :
num;
};
var _isNumber = function ( d, decimalPoint, formatted ) {
var strType = typeof d === 'string';
// If empty return immediately so there must be a number if it is a
// formatted string (this stops the string "k", or "kr", etc being detected
// as a formatted number for currency
if ( _empty( d ) ) {
return true;
}
if ( decimalPoint && strType ) {
d = _numToDecimal( d, decimalPoint );
}
if ( formatted && strType ) {
d = d.replace( _re_formatted_numeric, '' );
}
return !isNaN( parseFloat(d) ) && isFinite( d );
};
// A string without HTML in it can be considered to be HTML still
var _isHtml = function ( d ) {
return _empty( d ) || typeof d === 'string';
};
var _htmlNumeric = function ( d, decimalPoint, formatted ) {
if ( _empty( d ) ) {
return true;
}
var html = _isHtml( d );
return ! html ?
null :
_isNumber( _stripHtml( d ), decimalPoint, formatted ) ?
true :
null;
};
var _pluck = function ( a, prop, prop2 ) {
var out = [];
var i=0, ien=a.length;
// Could have the test in the loop for slightly smaller code, but speed
// is essential here
if ( prop2 !== undefined ) {
for ( ; i<ien ; i++ ) {
if ( a[i] && a[i][ prop ] ) {
out.push( a[i][ prop ][ prop2 ] );
}
}
}
else {
for ( ; i<ien ; i++ ) {
if ( a[i] ) {
out.push( a[i][ prop ] );
}
}
}
return out;
};
// Basically the same as _pluck, but rather than looping over `a` we use `order`
// as the indexes to pick from `a`
var _pluck_order = function ( a, order, prop, prop2 )
{
var out = [];
var i=0, ien=order.length;
// Could have the test in the loop for slightly smaller code, but speed
// is essential here
if ( prop2 !== undefined ) {
for ( ; i<ien ; i++ ) {
if ( a[ order[i] ][ prop ] ) {
out.push( a[ order[i] ][ prop ][ prop2 ] );
}
}
}
else {
for ( ; i<ien ; i++ ) {
out.push( a[ order[i] ][ prop ] );
}
}
return out;
};
var _range = function ( len, start )
{
var out = [];
var end;
if ( start === undefined ) {
start = 0;
end = len;
}
else {
end = start;
start = len;
}
for ( var i=start ; i<end ; i++ ) {
out.push( i );
}
return out;
};
var _removeEmpty = function ( a )
{
var out = [];
for ( var i=0, ien=a.length ; i<ien ; i++ ) {
if ( a[i] ) { // careful - will remove all falsy values!
out.push( a[i] );
}
}
return out;
};
var _stripHtml = function ( d ) {
return d.replace( _re_html, '' );
};
/**
* Determine if all values in the array are unique. This means we can short
* cut the _unique method at the cost of a single loop. A sorted array is used
* to easily check the values.
*
* @param {array} src Source array
* @return {boolean} true if all unique, false otherwise
* @ignore
*/
var _areAllUnique = function ( src ) {
if ( src.length < 2 ) {
return true;
}
var sorted = src.slice().sort();
var last = sorted[0];
for ( var i=1, ien=sorted.length ; i<ien ; i++ ) {
if ( sorted[i] === last ) {
return false;
}
last = sorted[i];
}
return true;
};
/**
* Find the unique elements in a source array.
*
* @param {array} src Source array
* @return {array} Array of unique items
* @ignore
*/
var _unique = function ( src )
{
if ( _areAllUnique( src ) ) {
return src.slice();
}
// A faster unique method is to use object keys to identify used values,
// but this doesn't work with arrays or objects, which we must also
// consider. See jsperf.com/compare-array-unique-versions/4 for more
// information.
var
out = [],
val,
i, ien=src.length,
j, k=0;
again: for ( i=0 ; i<ien ; i++ ) {
val = src[i];
for ( j=0 ; j<k ; j++ ) {
if ( out[j] === val ) {
continue again;
}
}
out.push( val );
k++;
}
return out;
};
/**
* DataTables utility methods
*
* This namespace provides helper methods that DataTables uses internally to
* create a DataTable, but which are not exclusively used only for DataTables.
* These methods can be used by extension authors to save the duplication of
* code.
*
* @namespace
*/
DataTable.util = {
/**
* Throttle the calls to a function. Arguments and context are maintained
* for the throttled function.
*
* @param {function} fn Function to be called
* @param {integer} freq Call frequency in mS
* @return {function} Wrapped function
*/
throttle: function ( fn, freq ) {
var
frequency = freq !== undefined ? freq : 200,
last,
timer;
return function () {
var
that = this,
now = +new Date(),
args = arguments;
if ( last && now < last + frequency ) {
clearTimeout( timer );
timer = setTimeout( function () {
last = undefined;
fn.apply( that, args );
}, frequency );
}
else {
last = now;
fn.apply( that, args );
}
};
},
/**
* Escape a string such that it can be used in a regular expression
*
* @param {string} val string to escape
* @returns {string} escaped string
*/
escapeRegex: function ( val ) {
return val.replace( _re_escape_regex, '\\$1' );
}
};
/**
* Create a mapping object that allows camel case parameters to be looked up
* for their Hungarian counterparts. The mapping is stored in a private
* parameter called `_hungarianMap` which can be accessed on the source object.
* @param {object} o
* @memberof DataTable#oApi
*/
function _fnHungarianMap ( o )
{
var
hungarian = 'a aa ai ao as b fn i m o s ',
match,
newKey,
map = {};
$.each( o, function (key, val) {
match = key.match(/^([^A-Z]+?)([A-Z])/);
if ( match && hungarian.indexOf(match[1]+' ') !== -1 )
{
newKey = key.replace( match[0], match[2].toLowerCase() );
map[ newKey ] = key;
if ( match[1] === 'o' )
{
_fnHungarianMap( o[key] );
}
}
} );
o._hungarianMap = map;
}
/**
* Convert from camel case parameters to Hungarian, based on a Hungarian map
* created by _fnHungarianMap.
* @param {object} src The model object which holds all parameters that can be
* mapped.
* @param {object} user The object to convert from camel case to Hungarian.
* @param {boolean} force When set to `true`, properties which already have a
* Hungarian value in the `user` object will be overwritten. Otherwise they
* won't be.
* @memberof DataTable#oApi
*/
function _fnCamelToHungarian ( src, user, force )
{
if ( ! src._hungarianMap ) {
_fnHungarianMap( src );
}
var hungarianKey;
$.each( user, function (key, val) {
hungarianKey = src._hungarianMap[ key ];
if ( hungarianKey !== undefined && (force || user[hungarianKey] === undefined) )
{
// For objects, we need to buzz down into the object to copy parameters
if ( hungarianKey.charAt(0) === 'o' )
{
// Copy the camelCase options over to the hungarian
if ( ! user[ hungarianKey ] ) {
user[ hungarianKey ] = {};
}
$.extend( true, user[hungarianKey], user[key] );
_fnCamelToHungarian( src[hungarianKey], user[hungarianKey], force );
}
else {
user[hungarianKey] = user[ key ];
}
}
} );
}
/**
* Language compatibility - when certain options are given, and others aren't, we
* need to duplicate the values over, in order to provide backwards compatibility
* with older language files.
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnLanguageCompat( lang )
{
var defaults = DataTable.defaults.oLanguage;
var zeroRecords = lang.sZeroRecords;
/* Backwards compatibility - if there is no sEmptyTable given, then use the same as
* sZeroRecords - assuming that is given.
*/
if ( ! lang.sEmptyTable && zeroRecords &&
defaults.sEmptyTable === "No data available in table" )
{
_fnMap( lang, lang, 'sZeroRecords', 'sEmptyTable' );
}
/* Likewise with loading records */
if ( ! lang.sLoadingRecords && zeroRecords &&
defaults.sLoadingRecords === "Loading..." )
{
_fnMap( lang, lang, 'sZeroRecords', 'sLoadingRecords' );
}
// Old parameter name of the thousands separator mapped onto the new
if ( lang.sInfoThousands ) {
lang.sThousands = lang.sInfoThousands;
}
var decimal = lang.sDecimal;
if ( decimal ) {
_addNumericSort( decimal );
}
}
/**
* Map one parameter onto another
* @param {object} o Object to map
* @param {*} knew The new parameter name
* @param {*} old The old parameter name
*/
var _fnCompatMap = function ( o, knew, old ) {
if ( o[ knew ] !== undefined ) {
o[ old ] = o[ knew ];
}
};
/**
* Provide backwards compatibility for the main DT options. Note that the new
* options are mapped onto the old parameters, so this is an external interface
* change only.
* @param {object} init Object to map
*/
function _fnCompatOpts ( init )
{
_fnCompatMap( init, 'ordering', 'bSort' );
_fnCompatMap( init, 'orderMulti', 'bSortMulti' );
_fnCompatMap( init, 'orderClasses', 'bSortClasses' );
_fnCompatMap( init, 'orderCellsTop', 'bSortCellsTop' );
_fnCompatMap( init, 'order', 'aaSorting' );
_fnCompatMap( init, 'orderFixed', 'aaSortingFixed' );
_fnCompatMap( init, 'paging', 'bPaginate' );
_fnCompatMap( init, 'pagingType', 'sPaginationType' );
_fnCompatMap( init, 'pageLength', 'iDisplayLength' );
_fnCompatMap( init, 'searching', 'bFilter' );
// Boolean initialisation of x-scrolling
if ( typeof init.sScrollX === 'boolean' ) {
init.sScrollX = init.sScrollX ? '100%' : '';
}
if ( typeof init.scrollX === 'boolean' ) {
init.scrollX = init.scrollX ? '100%' : '';
}
// Column search objects are in an array, so it needs to be converted
// element by element
var searchCols = init.aoSearchCols;
if ( searchCols ) {
for ( var i=0, ien=searchCols.length ; i<ien ; i++ ) {
if ( searchCols[i] ) {
_fnCamelToHungarian( DataTable.models.oSearch, searchCols[i] );
}
}
}
}
/**
* Provide backwards compatibility for column options. Note that the new options
* are mapped onto the old parameters, so this is an external interface change
* only.
* @param {object} init Object to map
*/
function _fnCompatCols ( init )
{
_fnCompatMap( init, 'orderable', 'bSortable' );
_fnCompatMap( init, 'orderData', 'aDataSort' );
_fnCompatMap( init, 'orderSequence', 'asSorting' );
_fnCompatMap( init, 'orderDataType', 'sortDataType' );
// orderData can be given as an integer
var dataSort = init.aDataSort;
if ( typeof dataSort === 'number' && ! $.isArray( dataSort ) ) {
init.aDataSort = [ dataSort ];
}
}
/**
* Browser feature detection for capabilities, quirks
* @param {object} settings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnBrowserDetect( settings )
{
// We don't need to do this every time DataTables is constructed, the values
// calculated are specific to the browser and OS configuration which we
// don't expect to change between initialisations
if ( ! DataTable.__browser ) {
var browser = {};
DataTable.__browser = browser;
// Scrolling feature / quirks detection
var n = $('<div/>')
.css( {
position: 'fixed',
top: 0,
left: $(window).scrollLeft()*-1, // allow for scrolling
height: 1,
width: 1,
overflow: 'hidden'
} )
.append(
$('<div/>')
.css( {
position: 'absolute',
top: 1,
left: 1,
width: 100,
overflow: 'scroll'
} )
.append(
$('<div/>')
.css( {
width: '100%',
height: 10
} )
)
)
.appendTo( 'body' );
var outer = n.children();
var inner = outer.children();
// Numbers below, in order, are:
// inner.offsetWidth, inner.clientWidth, outer.offsetWidth, outer.clientWidth
//
// IE6 XP: 100 100 100 83
// IE7 Vista: 100 100 100 83
// IE 8+ Windows: 83 83 100 83
// Evergreen Windows: 83 83 100 83
// Evergreen Mac with scrollbars: 85 85 100 85
// Evergreen Mac without scrollbars: 100 100 100 100
// Get scrollbar width
browser.barWidth = outer[0].offsetWidth - outer[0].clientWidth;
// IE6/7 will oversize a width 100% element inside a scrolling element, to
// include the width of the scrollbar, while other browsers ensure the inner
// element is contained without forcing scrolling
browser.bScrollOversize = inner[0].offsetWidth === 100 && outer[0].clientWidth !== 100;
// In rtl text layout, some browsers (most, but not all) will place the
// scrollbar on the left, rather than the right.
browser.bScrollbarLeft = Math.round( inner.offset().left ) !== 1;
// IE8- don't provide height and width for getBoundingClientRect
browser.bBounding = n[0].getBoundingClientRect().width ? true : false;
n.remove();
}
$.extend( settings.oBrowser, DataTable.__browser );
settings.oScroll.iBarWidth = DataTable.__browser.barWidth;
}
/**
* Array.prototype reduce[Right] method, used for browsers which don't support
* JS 1.6. Done this way to reduce code size, since we iterate either way
* @param {object} settings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnReduce ( that, fn, init, start, end, inc )
{
var
i = start,
value,
isSet = false;
if ( init !== undefined ) {
value = init;
isSet = true;
}
while ( i !== end ) {
if ( ! that.hasOwnProperty(i) ) {
continue;
}
value = isSet ?
fn( value, that[i], i, that ) :
that[i];
isSet = true;
i += inc;
}
return value;
}
/**
* Add a column to the list used for the table with default values
* @param {object} oSettings dataTables settings object
* @param {node} nTh The th element for this column
* @memberof DataTable#oApi
*/
function _fnAddColumn( oSettings, nTh )
{
// Add column to aoColumns array
var oDefaults = DataTable.defaults.column;
var iCol = oSettings.aoColumns.length;
var oCol = $.extend( {}, DataTable.models.oColumn, oDefaults, {
"nTh": nTh ? nTh : document.createElement('th'),
"sTitle": oDefaults.sTitle ? oDefaults.sTitle : nTh ? nTh.innerHTML : '',
"aDataSort": oDefaults.aDataSort ? oDefaults.aDataSort : [iCol],
"mData": oDefaults.mData ? oDefaults.mData : iCol,
idx: iCol
} );
oSettings.aoColumns.push( oCol );
// Add search object for column specific search. Note that the `searchCols[ iCol ]`
// passed into extend can be undefined. This allows the user to give a default
// with only some of the parameters defined, and also not give a default
var searchCols = oSettings.aoPreSearchCols;
searchCols[ iCol ] = $.extend( {}, DataTable.models.oSearch, searchCols[ iCol ] );
// Use the default column options function to initialise classes etc
_fnColumnOptions( oSettings, iCol, $(nTh).data() );
}
/**
* Apply options for a column
* @param {object} oSettings dataTables settings object
* @param {int} iCol column index to consider
* @param {object} oOptions object with sType, bVisible and bSearchable etc
* @memberof DataTable#oApi
*/
function _fnColumnOptions( oSettings, iCol, oOptions )
{
var oCol = oSettings.aoColumns[ iCol ];
var oClasses = oSettings.oClasses;
var th = $(oCol.nTh);
// Try to get width information from the DOM. We can't get it from CSS
// as we'd need to parse the CSS stylesheet. `width` option can override
if ( ! oCol.sWidthOrig ) {
// Width attribute
oCol.sWidthOrig = th.attr('width') || null;
// Style attribute
var t = (th.attr('style') || '').match(/width:\s*(\d+[pxem%]+)/);
if ( t ) {
oCol.sWidthOrig = t[1];
}
}
/* User specified column options */
if ( oOptions !== undefined && oOptions !== null )
{
// Backwards compatibility
_fnCompatCols( oOptions );
// Map camel case parameters to their Hungarian counterparts
_fnCamelToHungarian( DataTable.defaults.column, oOptions );
/* Backwards compatibility for mDataProp */
if ( oOptions.mDataProp !== undefined && !oOptions.mData )
{
oOptions.mData = oOptions.mDataProp;
}
if ( oOptions.sType )
{
oCol._sManualType = oOptions.sType;
}
// `class` is a reserved word in Javascript, so we need to provide
// the ability to use a valid name for the camel case input
if ( oOptions.className && ! oOptions.sClass )
{
oOptions.sClass = oOptions.className;
}
if ( oOptions.sClass ) {
th.addClass( oOptions.sClass );
}
$.extend( oCol, oOptions );
_fnMap( oCol, oOptions, "sWidth", "sWidthOrig" );
/* iDataSort to be applied (backwards compatibility), but aDataSort will take
* priority if defined
*/
if ( oOptions.iDataSort !== undefined )
{
oCol.aDataSort = [ oOptions.iDataSort ];
}
_fnMap( oCol, oOptions, "aDataSort" );
}
/* Cache the data get and set functions for speed */
var mDataSrc = oCol.mData;
var mData = _fnGetObjectDataFn( mDataSrc );
var mRender = oCol.mRender ? _fnGetObjectDataFn( oCol.mRender ) : null;
var attrTest = function( src ) {
return typeof src === 'string' && src.indexOf('@') !== -1;
};
oCol._bAttrSrc = $.isPlainObject( mDataSrc ) && (
attrTest(mDataSrc.sort) || attrTest(mDataSrc.type) || attrTest(mDataSrc.filter)
);
oCol._setter = null;
oCol.fnGetData = function (rowData, type, meta) {
var innerData = mData( rowData, type, undefined, meta );
return mRender && type ?
mRender( innerData, type, rowData, meta ) :
innerData;
};
oCol.fnSetData = function ( rowData, val, meta ) {
return _fnSetObjectDataFn( mDataSrc )( rowData, val, meta );
};
// Indicate if DataTables should read DOM data as an object or array
// Used in _fnGetRowElements
if ( typeof mDataSrc !== 'number' ) {
oSettings._rowReadObject = true;
}
/* Feature sorting overrides column specific when off */
if ( !oSettings.oFeatures.bSort )
{
oCol.bSortable = false;
th.addClass( oClasses.sSortableNone ); // Have to add class here as order event isn't called
}
/* Check that the class assignment is correct for sorting */
var bAsc = $.inArray('asc', oCol.asSorting) !== -1;
var bDesc = $.inArray('desc', oCol.asSorting) !== -1;
if ( !oCol.bSortable || (!bAsc && !bDesc) )
{
oCol.sSortingClass = oClasses.sSortableNone;
oCol.sSortingClassJUI = "";
}
else if ( bAsc && !bDesc )
{
oCol.sSortingClass = oClasses.sSortableAsc;
oCol.sSortingClassJUI = oClasses.sSortJUIAscAllowed;
}
else if ( !bAsc && bDesc )
{
oCol.sSortingClass = oClasses.sSortableDesc;
oCol.sSortingClassJUI = oClasses.sSortJUIDescAllowed;
}
else
{
oCol.sSortingClass = oClasses.sSortable;
oCol.sSortingClassJUI = oClasses.sSortJUI;
}
}
/**
* Adjust the table column widths for new data. Note: you would probably want to
* do a redraw after calling this function!
* @param {object} settings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnAdjustColumnSizing ( settings )
{
/* Not interested in doing column width calculation if auto-width is disabled */
if ( settings.oFeatures.bAutoWidth !== false )
{
var columns = settings.aoColumns;
_fnCalculateColumnWidths( settings );
for ( var i=0 , iLen=columns.length ; i<iLen ; i++ )
{
columns[i].nTh.style.width = columns[i].sWidth;
}
}
var scroll = settings.oScroll;
if ( scroll.sY !== '' || scroll.sX !== '')
{
_fnScrollDraw( settings );
}
_fnCallbackFire( settings, null, 'column-sizing', [settings] );
}
/**
* Covert the index of a visible column to the index in the data array (take account
* of hidden columns)
* @param {object} oSettings dataTables settings object
* @param {int} iMatch Visible column index to lookup
* @returns {int} i the data index
* @memberof DataTable#oApi
*/
function _fnVisibleToColumnIndex( oSettings, iMatch )
{
var aiVis = _fnGetColumns( oSettings, 'bVisible' );
return typeof aiVis[iMatch] === 'number' ?
aiVis[iMatch] :
null;
}
/**
* Covert the index of an index in the data array and convert it to the visible
* column index (take account of hidden columns)
* @param {int} iMatch Column index to lookup
* @param {object} oSettings dataTables settings object
* @returns {int} i the data index
* @memberof DataTable#oApi
*/
function _fnColumnIndexToVisible( oSettings, iMatch )
{
var aiVis = _fnGetColumns( oSettings, 'bVisible' );
var iPos = $.inArray( iMatch, aiVis );
return iPos !== -1 ? iPos : null;
}
/**
* Get the number of visible columns
* @param {object} oSettings dataTables settings object
* @returns {int} i the number of visible columns
* @memberof DataTable#oApi
*/
function _fnVisbleColumns( oSettings )
{
var vis = 0;
// No reduce in IE8, use a loop for now
$.each( oSettings.aoColumns, function ( i, col ) {
if ( col.bVisible && $(col.nTh).css('display') !== 'none' ) {
vis++;
}
} );
return vis;
}
/**
* Get an array of column indexes that match a given property
* @param {object} oSettings dataTables settings object
* @param {string} sParam Parameter in aoColumns to look for - typically
* bVisible or bSearchable
* @returns {array} Array of indexes with matched properties
* @memberof DataTable#oApi
*/
function _fnGetColumns( oSettings, sParam )
{
var a = [];
$.map( oSettings.aoColumns, function(val, i) {
if ( val[sParam] ) {
a.push( i );
}
} );
return a;
}
/**
* Calculate the 'type' of a column
* @param {object} settings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnColumnTypes ( settings )
{
var columns = settings.aoColumns;
var data = settings.aoData;
var types = DataTable.ext.type.detect;
var i, ien, j, jen, k, ken;
var col, cell, detectedType, cache;
// For each column, spin over the
for ( i=0, ien=columns.length ; i<ien ; i++ ) {
col = columns[i];
cache = [];
if ( ! col.sType && col._sManualType ) {
col.sType = col._sManualType;
}
else if ( ! col.sType ) {
for ( j=0, jen=types.length ; j<jen ; j++ ) {
for ( k=0, ken=data.length ; k<ken ; k++ ) {
// Use a cache array so we only need to get the type data
// from the formatter once (when using multiple detectors)
if ( cache[k] === undefined ) {
cache[k] = _fnGetCellData( settings, k, i, 'type' );
}
detectedType = types[j]( cache[k], settings );
// If null, then this type can't apply to this column, so
// rather than testing all cells, break out. There is an
// exception for the last type which is `html`. We need to
// scan all rows since it is possible to mix string and HTML
// types
if ( ! detectedType && j !== types.length-1 ) {
break;
}
// Only a single match is needed for html type since it is
// bottom of the pile and very similar to string
if ( detectedType === 'html' ) {
break;
}
}
// Type is valid for all data points in the column - use this
// type
if ( detectedType ) {
col.sType = detectedType;
break;
}
}
// Fall back - if no type was detected, always use string
if ( ! col.sType ) {
col.sType = 'string';
}
}
}
}
/**
* Take the column definitions and static columns arrays and calculate how
* they relate to column indexes. The callback function will then apply the
* definition found for a column to a suitable configuration object.
* @param {object} oSettings dataTables settings object
* @param {array} aoColDefs The aoColumnDefs array that is to be applied
* @param {array} aoCols The aoColumns array that defines columns individually
* @param {function} fn Callback function - takes two parameters, the calculated
* column index and the definition for that column.
* @memberof DataTable#oApi
*/
function _fnApplyColumnDefs( oSettings, aoColDefs, aoCols, fn )
{
var i, iLen, j, jLen, k, kLen, def;
var columns = oSettings.aoColumns;
// Column definitions with aTargets
if ( aoColDefs )
{
/* Loop over the definitions array - loop in reverse so first instance has priority */
for ( i=aoColDefs.length-1 ; i>=0 ; i-- )
{
def = aoColDefs[i];
/* Each definition can target multiple columns, as it is an array */
var aTargets = def.targets !== undefined ?
def.targets :
def.aTargets;
if ( ! $.isArray( aTargets ) )
{
aTargets = [ aTargets ];
}
for ( j=0, jLen=aTargets.length ; j<jLen ; j++ )
{
if ( typeof aTargets[j] === 'number' && aTargets[j] >= 0 )
{
/* Add columns that we don't yet know about */
while( columns.length <= aTargets[j] )
{
_fnAddColumn( oSettings );
}
/* Integer, basic index */
fn( aTargets[j], def );
}
else if ( typeof aTargets[j] === 'number' && aTargets[j] < 0 )
{
/* Negative integer, right to left column counting */
fn( columns.length+aTargets[j], def );
}
else if ( typeof aTargets[j] === 'string' )
{
/* Class name matching on TH element */
for ( k=0, kLen=columns.length ; k<kLen ; k++ )
{
if ( aTargets[j] == "_all" ||
$(columns[k].nTh).hasClass( aTargets[j] ) )
{
fn( k, def );
}
}
}
}
}
}
// Statically defined columns array
if ( aoCols )
{
for ( i=0, iLen=aoCols.length ; i<iLen ; i++ )
{
fn( i, aoCols[i] );
}
}
}
/**
* Add a data array to the table, creating DOM node etc. This is the parallel to
* _fnGatherData, but for adding rows from a Javascript source, rather than a
* DOM source.
* @param {object} oSettings dataTables settings object
* @param {array} aData data array to be added
* @param {node} [nTr] TR element to add to the table - optional. If not given,
* DataTables will create a row automatically
* @param {array} [anTds] Array of TD|TH elements for the row - must be given
* if nTr is.
* @returns {int} >=0 if successful (index of new aoData entry), -1 if failed
* @memberof DataTable#oApi
*/
function _fnAddData ( oSettings, aDataIn, nTr, anTds )
{
/* Create the object for storing information about this new row */
var iRow = oSettings.aoData.length;
var oData = $.extend( true, {}, DataTable.models.oRow, {
src: nTr ? 'dom' : 'data',
idx: iRow
} );
oData._aData = aDataIn;
oSettings.aoData.push( oData );
/* Create the cells */
var nTd, sThisType;
var columns = oSettings.aoColumns;
// Invalidate the column types as the new data needs to be revalidated
for ( var i=0, iLen=columns.length ; i<iLen ; i++ )
{
columns[i].sType = null;
}
/* Add to the display array */
oSettings.aiDisplayMaster.push( iRow );
var id = oSettings.rowIdFn( aDataIn );
if ( id !== undefined ) {
oSettings.aIds[ id ] = oData;
}
/* Create the DOM information, or register it if already present */
if ( nTr || ! oSettings.oFeatures.bDeferRender )
{
_fnCreateTr( oSettings, iRow, nTr, anTds );
}
return iRow;
}
/**
* Add one or more TR elements to the table. Generally we'd expect to
* use this for reading data from a DOM sourced table, but it could be
* used for an TR element. Note that if a TR is given, it is used (i.e.
* it is not cloned).
* @param {object} settings dataTables settings object
* @param {array|node|jQuery} trs The TR element(s) to add to the table
* @returns {array} Array of indexes for the added rows
* @memberof DataTable#oApi
*/
function _fnAddTr( settings, trs )
{
var row;
// Allow an individual node to be passed in
if ( ! (trs instanceof $) ) {
trs = $(trs);
}
return trs.map( function (i, el) {
row = _fnGetRowElements( settings, el );
return _fnAddData( settings, row.data, el, row.cells );
} );
}
/**
* Take a TR element and convert it to an index in aoData
* @param {object} oSettings dataTables settings object
* @param {node} n the TR element to find
* @returns {int} index if the node is found, null if not
* @memberof DataTable#oApi
*/
function _fnNodeToDataIndex( oSettings, n )
{
return (n._DT_RowIndex!==undefined) ? n._DT_RowIndex : null;
}
/**
* Take a TD element and convert it into a column data index (not the visible index)
* @param {object} oSettings dataTables settings object
* @param {int} iRow The row number the TD/TH can be found in
* @param {node} n The TD/TH element to find
* @returns {int} index if the node is found, -1 if not
* @memberof DataTable#oApi
*/
function _fnNodeToColumnIndex( oSettings, iRow, n )
{
return $.inArray( n, oSettings.aoData[ iRow ].anCells );
}
/**
* Get the data for a given cell from the internal cache, taking into account data mapping
* @param {object} settings dataTables settings object
* @param {int} rowIdx aoData row id
* @param {int} colIdx Column index
* @param {string} type data get type ('display', 'type' 'filter' 'sort')
* @returns {*} Cell data
* @memberof DataTable#oApi
*/
function _fnGetCellData( settings, rowIdx, colIdx, type )
{
var draw = settings.iDraw;
var col = settings.aoColumns[colIdx];
var rowData = settings.aoData[rowIdx]._aData;
var defaultContent = col.sDefaultContent;
var cellData = col.fnGetData( rowData, type, {
settings: settings,
row: rowIdx,
col: colIdx
} );
if ( cellData === undefined ) {
if ( settings.iDrawError != draw && defaultContent === null ) {
_fnLog( settings, 0, "Requested unknown parameter "+
(typeof col.mData=='function' ? '{function}' : "'"+col.mData+"'")+
" for row "+rowIdx+", column "+colIdx, 4 );
settings.iDrawError = draw;
}
return defaultContent;
}
// When the data source is null and a specific data type is requested (i.e.
// not the original data), we can use default column data
if ( (cellData === rowData || cellData === null) && defaultContent !== null && type !== undefined ) {
cellData = defaultContent;
}
else if ( typeof cellData === 'function' ) {
// If the data source is a function, then we run it and use the return,
// executing in the scope of the data object (for instances)
return cellData.call( rowData );
}
if ( cellData === null && type == 'display' ) {
return '';
}
return cellData;
}
/**
* Set the value for a specific cell, into the internal data cache
* @param {object} settings dataTables settings object
* @param {int} rowIdx aoData row id
* @param {int} colIdx Column index
* @param {*} val Value to set
* @memberof DataTable#oApi
*/
function _fnSetCellData( settings, rowIdx, colIdx, val )
{
var col = settings.aoColumns[colIdx];
var rowData = settings.aoData[rowIdx]._aData;
col.fnSetData( rowData, val, {
settings: settings,
row: rowIdx,
col: colIdx
} );
}
// Private variable that is used to match action syntax in the data property object
var __reArray = /\[.*?\]$/;
var __reFn = /\(\)$/;
/**
* Split string on periods, taking into account escaped periods
* @param {string} str String to split
* @return {array} Split string
*/
function _fnSplitObjNotation( str )
{
return $.map( str.match(/(\\.|[^\.])+/g) || [''], function ( s ) {
return s.replace(/\\\./g, '.');
} );
}
/**
* Return a function that can be used to get data from a source object, taking
* into account the ability to use nested objects as a source
* @param {string|int|function} mSource The data source for the object
* @returns {function} Data get function
* @memberof DataTable#oApi
*/
function _fnGetObjectDataFn( mSource )
{
if ( $.isPlainObject( mSource ) )
{
/* Build an object of get functions, and wrap them in a single call */
var o = {};
$.each( mSource, function (key, val) {
if ( val ) {
o[key] = _fnGetObjectDataFn( val );
}
} );
return function (data, type, row, meta) {
var t = o[type] || o._;
return t !== undefined ?
t(data, type, row, meta) :
data;
};
}
else if ( mSource === null )
{
/* Give an empty string for rendering / sorting etc */
return function (data) { // type, row and meta also passed, but not used
return data;
};
}
else if ( typeof mSource === 'function' )
{
return function (data, type, row, meta) {
return mSource( data, type, row, meta );
};
}
else if ( typeof mSource === 'string' && (mSource.indexOf('.') !== -1 ||
mSource.indexOf('[') !== -1 || mSource.indexOf('(') !== -1) )
{
/* If there is a . in the source string then the data source is in a
* nested object so we loop over the data for each level to get the next
* level down. On each loop we test for undefined, and if found immediately
* return. This allows entire objects to be missing and sDefaultContent to
* be used if defined, rather than throwing an error
*/
var fetchData = function (data, type, src) {
var arrayNotation, funcNotation, out, innerSrc;
if ( src !== "" )
{
var a = _fnSplitObjNotation( src );
for ( var i=0, iLen=a.length ; i<iLen ; i++ )
{
// Check if we are dealing with special notation
arrayNotation = a[i].match(__reArray);
funcNotation = a[i].match(__reFn);
if ( arrayNotation )
{
// Array notation
a[i] = a[i].replace(__reArray, '');
// Condition allows simply [] to be passed in
if ( a[i] !== "" ) {
data = data[ a[i] ];
}
out = [];
// Get the remainder of the nested object to get
a.splice( 0, i+1 );
innerSrc = a.join('.');
// Traverse each entry in the array getting the properties requested
if ( $.isArray( data ) ) {
for ( var j=0, jLen=data.length ; j<jLen ; j++ ) {
out.push( fetchData( data[j], type, innerSrc ) );
}
}
// If a string is given in between the array notation indicators, that
// is used to join the strings together, otherwise an array is returned
var join = arrayNotation[0].substring(1, arrayNotation[0].length-1);
data = (join==="") ? out : out.join(join);
// The inner call to fetchData has already traversed through the remainder
// of the source requested, so we exit from the loop
break;
}
else if ( funcNotation )
{
// Function call
a[i] = a[i].replace(__reFn, '');
data = data[ a[i] ]();
continue;
}
if ( data === null || data[ a[i] ] === undefined )
{
return undefined;
}
data = data[ a[i] ];
}
}
return data;
};
return function (data, type) { // row and meta also passed, but not used
return fetchData( data, type, mSource );
};
}
else
{
/* Array or flat object mapping */
return function (data, type) { // row and meta also passed, but not used
return data[mSource];
};
}
}
/**
* Return a function that can be used to set data from a source object, taking
* into account the ability to use nested objects as a source
* @param {string|int|function} mSource The data source for the object
* @returns {function} Data set function
* @memberof DataTable#oApi
*/
function _fnSetObjectDataFn( mSource )
{
if ( $.isPlainObject( mSource ) )
{
/* Unlike get, only the underscore (global) option is used for for
* setting data since we don't know the type here. This is why an object
* option is not documented for `mData` (which is read/write), but it is
* for `mRender` which is read only.
*/
return _fnSetObjectDataFn( mSource._ );
}
else if ( mSource === null )
{
/* Nothing to do when the data source is null */
return function () {};
}
else if ( typeof mSource === 'function' )
{
return function (data, val, meta) {
mSource( data, 'set', val, meta );
};
}
else if ( typeof mSource === 'string' && (mSource.indexOf('.') !== -1 ||
mSource.indexOf('[') !== -1 || mSource.indexOf('(') !== -1) )
{
/* Like the get, we need to get data from a nested object */
var setData = function (data, val, src) {
var a = _fnSplitObjNotation( src ), b;
var aLast = a[a.length-1];
var arrayNotation, funcNotation, o, innerSrc;
for ( var i=0, iLen=a.length-1 ; i<iLen ; i++ )
{
// Check if we are dealing with an array notation request
arrayNotation = a[i].match(__reArray);
funcNotation = a[i].match(__reFn);
if ( arrayNotation )
{
a[i] = a[i].replace(__reArray, '');
data[ a[i] ] = [];
// Get the remainder of the nested object to set so we can recurse
b = a.slice();
b.splice( 0, i+1 );
innerSrc = b.join('.');
// Traverse each entry in the array setting the properties requested
if ( $.isArray( val ) )
{
for ( var j=0, jLen=val.length ; j<jLen ; j++ )
{
o = {};
setData( o, val[j], innerSrc );
data[ a[i] ].push( o );
}
}
else
{
// We've been asked to save data to an array, but it
// isn't array data to be saved. Best that can be done
// is to just save the value.
data[ a[i] ] = val;
}
// The inner call to setData has already traversed through the remainder
// of the source and has set the data, thus we can exit here
return;
}
else if ( funcNotation )
{
// Function call
a[i] = a[i].replace(__reFn, '');
data = data[ a[i] ]( val );
}
// If the nested object doesn't currently exist - since we are
// trying to set the value - create it
if ( data[ a[i] ] === null || data[ a[i] ] === undefined )
{
data[ a[i] ] = {};
}
data = data[ a[i] ];
}
// Last item in the input - i.e, the actual set
if ( aLast.match(__reFn ) )
{
// Function call
data = data[ aLast.replace(__reFn, '') ]( val );
}
else
{
// If array notation is used, we just want to strip it and use the property name
// and assign the value. If it isn't used, then we get the result we want anyway
data[ aLast.replace(__reArray, '') ] = val;
}
};
return function (data, val) { // meta is also passed in, but not used
return setData( data, val, mSource );
};
}
else
{
/* Array or flat object mapping */
return function (data, val) { // meta is also passed in, but not used
data[mSource] = val;
};
}
}
/**
* Return an array with the full table data
* @param {object} oSettings dataTables settings object
* @returns array {array} aData Master data array
* @memberof DataTable#oApi
*/
function _fnGetDataMaster ( settings )
{
return _pluck( settings.aoData, '_aData' );
}
/**
* Nuke the table
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnClearTable( settings )
{
settings.aoData.length = 0;
settings.aiDisplayMaster.length = 0;
settings.aiDisplay.length = 0;
settings.aIds = {};
}
/**
* Take an array of integers (index array) and remove a target integer (value - not
* the key!)
* @param {array} a Index array to target
* @param {int} iTarget value to find
* @memberof DataTable#oApi
*/
function _fnDeleteIndex( a, iTarget, splice )
{
var iTargetIndex = -1;
for ( var i=0, iLen=a.length ; i<iLen ; i++ )
{
if ( a[i] == iTarget )
{
iTargetIndex = i;
}
else if ( a[i] > iTarget )
{
a[i]--;
}
}
if ( iTargetIndex != -1 && splice === undefined )
{
a.splice( iTargetIndex, 1 );
}
}
/**
* Mark cached data as invalid such that a re-read of the data will occur when
* the cached data is next requested. Also update from the data source object.
*
* @param {object} settings DataTables settings object
* @param {int} rowIdx Row index to invalidate
* @param {string} [src] Source to invalidate from: undefined, 'auto', 'dom'
* or 'data'
* @param {int} [colIdx] Column index to invalidate. If undefined the whole
* row will be invalidated
* @memberof DataTable#oApi
*
* @todo For the modularisation of v1.11 this will need to become a callback, so
* the sort and filter methods can subscribe to it. That will required
* initialisation options for sorting, which is why it is not already baked in
*/
function _fnInvalidate( settings, rowIdx, src, colIdx )
{
var row = settings.aoData[ rowIdx ];
var i, ien;
var cellWrite = function ( cell, col ) {
// This is very frustrating, but in IE if you just write directly
// to innerHTML, and elements that are overwritten are GC'ed,
// even if there is a reference to them elsewhere
while ( cell.childNodes.length ) {
cell.removeChild( cell.firstChild );
}
cell.innerHTML = _fnGetCellData( settings, rowIdx, col, 'display' );
};
// Are we reading last data from DOM or the data object?
if ( src === 'dom' || ((! src || src === 'auto') && row.src === 'dom') ) {
// Read the data from the DOM
row._aData = _fnGetRowElements(
settings, row, colIdx, colIdx === undefined ? undefined : row._aData
)
.data;
}
else {
// Reading from data object, update the DOM
var cells = row.anCells;
if ( cells ) {
if ( colIdx !== undefined ) {
cellWrite( cells[colIdx], colIdx );
}
else {
for ( i=0, ien=cells.length ; i<ien ; i++ ) {
cellWrite( cells[i], i );
}
}
}
}
// For both row and cell invalidation, the cached data for sorting and
// filtering is nulled out
row._aSortData = null;
row._aFilterData = null;
// Invalidate the type for a specific column (if given) or all columns since
// the data might have changed
var cols = settings.aoColumns;
if ( colIdx !== undefined ) {
cols[ colIdx ].sType = null;
}
else {
for ( i=0, ien=cols.length ; i<ien ; i++ ) {
cols[i].sType = null;
}
// Update DataTables special `DT_*` attributes for the row
_fnRowAttributes( settings, row );
}
}
/**
* Build a data source object from an HTML row, reading the contents of the
* cells that are in the row.
*
* @param {object} settings DataTables settings object
* @param {node|object} TR element from which to read data or existing row
* object from which to re-read the data from the cells
* @param {int} [colIdx] Optional column index
* @param {array|object} [d] Data source object. If `colIdx` is given then this
* parameter should also be given and will be used to write the data into.
* Only the column in question will be written
* @returns {object} Object with two parameters: `data` the data read, in
* document order, and `cells` and array of nodes (they can be useful to the
* caller, so rather than needing a second traversal to get them, just return
* them from here).
* @memberof DataTable#oApi
*/
function _fnGetRowElements( settings, row, colIdx, d )
{
var
tds = [],
td = row.firstChild,
name, col, o, i=0, contents,
columns = settings.aoColumns,
objectRead = settings._rowReadObject;
// Allow the data object to be passed in, or construct
d = d !== undefined ?
d :
objectRead ?
{} :
[];
var attr = function ( str, td ) {
if ( typeof str === 'string' ) {
var idx = str.indexOf('@');
if ( idx !== -1 ) {
var attr = str.substring( idx+1 );
var setter = _fnSetObjectDataFn( str );
setter( d, td.getAttribute( attr ) );
}
}
};
// Read data from a cell and store into the data object
var cellProcess = function ( cell ) {
if ( colIdx === undefined || colIdx === i ) {
col = columns[i];
contents = $.trim(cell.innerHTML);
if ( col && col._bAttrSrc ) {
var setter = _fnSetObjectDataFn( col.mData._ );
setter( d, contents );
attr( col.mData.sort, cell );
attr( col.mData.type, cell );
attr( col.mData.filter, cell );
}
else {
// Depending on the `data` option for the columns the data can
// be read to either an object or an array.
if ( objectRead ) {
if ( ! col._setter ) {
// Cache the setter function
col._setter = _fnSetObjectDataFn( col.mData );
}
col._setter( d, contents );
}
else {
d[i] = contents;
}
}
}
i++;
};
if ( td ) {
// `tr` element was passed in
while ( td ) {
name = td.nodeName.toUpperCase();
if ( name == "TD" || name == "TH" ) {
cellProcess( td );
tds.push( td );
}
td = td.nextSibling;
}
}
else {
// Existing row object passed in
tds = row.anCells;
for ( var j=0, jen=tds.length ; j<jen ; j++ ) {
cellProcess( tds[j] );
}
}
// Read the ID from the DOM if present
var rowNode = row.firstChild ? row : row.nTr;
if ( rowNode ) {
var id = rowNode.getAttribute( 'id' );
if ( id ) {
_fnSetObjectDataFn( settings.rowId )( d, id );
}
}
return {
data: d,
cells: tds
};
}
/**
* Create a new TR element (and it's TD children) for a row
* @param {object} oSettings dataTables settings object
* @param {int} iRow Row to consider
* @param {node} [nTrIn] TR element to add to the table - optional. If not given,
* DataTables will create a row automatically
* @param {array} [anTds] Array of TD|TH elements for the row - must be given
* if nTr is.
* @memberof DataTable#oApi
*/
function _fnCreateTr ( oSettings, iRow, nTrIn, anTds )
{
var
row = oSettings.aoData[iRow],
rowData = row._aData,
cells = [],
nTr, nTd, oCol,
i, iLen;
if ( row.nTr === null )
{
nTr = nTrIn || document.createElement('tr');
row.nTr = nTr;
row.anCells = cells;
/* Use a private property on the node to allow reserve mapping from the node
* to the aoData array for fast look up
*/
nTr._DT_RowIndex = iRow;
/* Special parameters can be given by the data source to be used on the row */
_fnRowAttributes( oSettings, row );
/* Process each column */
for ( i=0, iLen=oSettings.aoColumns.length ; i<iLen ; i++ )
{
oCol = oSettings.aoColumns[i];
nTd = nTrIn ? anTds[i] : document.createElement( oCol.sCellType );
nTd._DT_CellIndex = {
row: iRow,
column: i
};
cells.push( nTd );
// Need to create the HTML if new, or if a rendering function is defined
if ( (!nTrIn || oCol.mRender || oCol.mData !== i) &&
(!$.isPlainObject(oCol.mData) || oCol.mData._ !== i+'.display')
) {
nTd.innerHTML = _fnGetCellData( oSettings, iRow, i, 'display' );
}
/* Add user defined class */
if ( oCol.sClass )
{
nTd.className += ' '+oCol.sClass;
}
// Visibility - add or remove as required
if ( oCol.bVisible && ! nTrIn )
{
nTr.appendChild( nTd );
}
else if ( ! oCol.bVisible && nTrIn )
{
nTd.parentNode.removeChild( nTd );
}
if ( oCol.fnCreatedCell )
{
oCol.fnCreatedCell.call( oSettings.oInstance,
nTd, _fnGetCellData( oSettings, iRow, i ), rowData, iRow, i
);
}
}
_fnCallbackFire( oSettings, 'aoRowCreatedCallback', null, [nTr, rowData, iRow] );
}
// Remove once webkit bug 131819 and Chromium bug 365619 have been resolved
// and deployed
row.nTr.setAttribute( 'role', 'row' );
}
/**
* Add attributes to a row based on the special `DT_*` parameters in a data
* source object.
* @param {object} settings DataTables settings object
* @param {object} DataTables row object for the row to be modified
* @memberof DataTable#oApi
*/
function _fnRowAttributes( settings, row )
{
var tr = row.nTr;
var data = row._aData;
if ( tr ) {
var id = settings.rowIdFn( data );
if ( id ) {
tr.id = id;
}
if ( data.DT_RowClass ) {
// Remove any classes added by DT_RowClass before
var a = data.DT_RowClass.split(' ');
row.__rowc = row.__rowc ?
_unique( row.__rowc.concat( a ) ) :
a;
$(tr)
.removeClass( row.__rowc.join(' ') )
.addClass( data.DT_RowClass );
}
if ( data.DT_RowAttr ) {
$(tr).attr( data.DT_RowAttr );
}
if ( data.DT_RowData ) {
$(tr).data( data.DT_RowData );
}
}
}
/**
* Create the HTML header for the table
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnBuildHead( oSettings )
{
var i, ien, cell, row, column;
var thead = oSettings.nTHead;
var tfoot = oSettings.nTFoot;
var createHeader = $('th, td', thead).length === 0;
var classes = oSettings.oClasses;
var columns = oSettings.aoColumns;
if ( createHeader ) {
row = $('<tr/>').appendTo( thead );
}
for ( i=0, ien=columns.length ; i<ien ; i++ ) {
column = columns[i];
cell = $( column.nTh ).addClass( column.sClass );
if ( createHeader ) {
cell.appendTo( row );
}
// 1.11 move into sorting
if ( oSettings.oFeatures.bSort ) {
cell.addClass( column.sSortingClass );
if ( column.bSortable !== false ) {
cell
.attr( 'tabindex', oSettings.iTabIndex )
.attr( 'aria-controls', oSettings.sTableId );
_fnSortAttachListener( oSettings, column.nTh, i );
}
}
if ( column.sTitle != cell[0].innerHTML ) {
cell.html( column.sTitle );
}
_fnRenderer( oSettings, 'header' )(
oSettings, cell, column, classes
);
}
if ( createHeader ) {
_fnDetectHeader( oSettings.aoHeader, thead );
}
/* ARIA role for the rows */
$(thead).find('>tr').attr('role', 'row');
/* Deal with the footer - add classes if required */
$(thead).find('>tr>th, >tr>td').addClass( classes.sHeaderTH );
$(tfoot).find('>tr>th, >tr>td').addClass( classes.sFooterTH );
// Cache the footer cells. Note that we only take the cells from the first
// row in the footer. If there is more than one row the user wants to
// interact with, they need to use the table().foot() method. Note also this
// allows cells to be used for multiple columns using colspan
if ( tfoot !== null ) {
var cells = oSettings.aoFooter[0];
for ( i=0, ien=cells.length ; i<ien ; i++ ) {
column = columns[i];
column.nTf = cells[i].cell;
if ( column.sClass ) {
$(column.nTf).addClass( column.sClass );
}
}
}
}
/**
* Draw the header (or footer) element based on the column visibility states. The
* methodology here is to use the layout array from _fnDetectHeader, modified for
* the instantaneous column visibility, to construct the new layout. The grid is
* traversed over cell at a time in a rows x columns grid fashion, although each
* cell insert can cover multiple elements in the grid - which is tracks using the
* aApplied array. Cell inserts in the grid will only occur where there isn't
* already a cell in that position.
* @param {object} oSettings dataTables settings object
* @param array {objects} aoSource Layout array from _fnDetectHeader
* @param {boolean} [bIncludeHidden=false] If true then include the hidden columns in the calc,
* @memberof DataTable#oApi
*/
function _fnDrawHead( oSettings, aoSource, bIncludeHidden )
{
var i, iLen, j, jLen, k, kLen, n, nLocalTr;
var aoLocal = [];
var aApplied = [];
var iColumns = oSettings.aoColumns.length;
var iRowspan, iColspan;
if ( ! aoSource )
{
return;
}
if ( bIncludeHidden === undefined )
{
bIncludeHidden = false;
}
/* Make a copy of the master layout array, but without the visible columns in it */
for ( i=0, iLen=aoSource.length ; i<iLen ; i++ )
{
aoLocal[i] = aoSource[i].slice();
aoLocal[i].nTr = aoSource[i].nTr;
/* Remove any columns which are currently hidden */
for ( j=iColumns-1 ; j>=0 ; j-- )
{
if ( !oSettings.aoColumns[j].bVisible && !bIncludeHidden )
{
aoLocal[i].splice( j, 1 );
}
}
/* Prep the applied array - it needs an element for each row */
aApplied.push( [] );
}
for ( i=0, iLen=aoLocal.length ; i<iLen ; i++ )
{
nLocalTr = aoLocal[i].nTr;
/* All cells are going to be replaced, so empty out the row */
if ( nLocalTr )
{
while( (n = nLocalTr.firstChild) )
{
nLocalTr.removeChild( n );
}
}
for ( j=0, jLen=aoLocal[i].length ; j<jLen ; j++ )
{
iRowspan = 1;
iColspan = 1;
/* Check to see if there is already a cell (row/colspan) covering our target
* insert point. If there is, then there is nothing to do.
*/
if ( aApplied[i][j] === undefined )
{
nLocalTr.appendChild( aoLocal[i][j].cell );
aApplied[i][j] = 1;
/* Expand the cell to cover as many rows as needed */
while ( aoLocal[i+iRowspan] !== undefined &&
aoLocal[i][j].cell == aoLocal[i+iRowspan][j].cell )
{
aApplied[i+iRowspan][j] = 1;
iRowspan++;
}
/* Expand the cell to cover as many columns as needed */
while ( aoLocal[i][j+iColspan] !== undefined &&
aoLocal[i][j].cell == aoLocal[i][j+iColspan].cell )
{
/* Must update the applied array over the rows for the columns */
for ( k=0 ; k<iRowspan ; k++ )
{
aApplied[i+k][j+iColspan] = 1;
}
iColspan++;
}
/* Do the actual expansion in the DOM */
$(aoLocal[i][j].cell)
.attr('rowspan', iRowspan)
.attr('colspan', iColspan);
}
}
}
}
/**
* Insert the required TR nodes into the table for display
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnDraw( oSettings )
{
/* Provide a pre-callback function which can be used to cancel the draw is false is returned */
var aPreDraw = _fnCallbackFire( oSettings, 'aoPreDrawCallback', 'preDraw', [oSettings] );
if ( $.inArray( false, aPreDraw ) !== -1 )
{
_fnProcessingDisplay( oSettings, false );
return;
}
var i, iLen, n;
var anRows = [];
var iRowCount = 0;
var asStripeClasses = oSettings.asStripeClasses;
var iStripes = asStripeClasses.length;
var iOpenRows = oSettings.aoOpenRows.length;
var oLang = oSettings.oLanguage;
var iInitDisplayStart = oSettings.iInitDisplayStart;
var bServerSide = _fnDataSource( oSettings ) == 'ssp';
var aiDisplay = oSettings.aiDisplay;
oSettings.bDrawing = true;
/* Check and see if we have an initial draw position from state saving */
if ( iInitDisplayStart !== undefined && iInitDisplayStart !== -1 )
{
oSettings._iDisplayStart = bServerSide ?
iInitDisplayStart :
iInitDisplayStart >= oSettings.fnRecordsDisplay() ?
0 :
iInitDisplayStart;
oSettings.iInitDisplayStart = -1;
}
var iDisplayStart = oSettings._iDisplayStart;
var iDisplayEnd = oSettings.fnDisplayEnd();
/* Server-side processing draw intercept */
if ( oSettings.bDeferLoading )
{
oSettings.bDeferLoading = false;
oSettings.iDraw++;
_fnProcessingDisplay( oSettings, false );
}
else if ( !bServerSide )
{
oSettings.iDraw++;
}
else if ( !oSettings.bDestroying && !_fnAjaxUpdate( oSettings ) )
{
return;
}
if ( aiDisplay.length !== 0 )
{
var iStart = bServerSide ? 0 : iDisplayStart;
var iEnd = bServerSide ? oSettings.aoData.length : iDisplayEnd;
for ( var j=iStart ; j<iEnd ; j++ )
{
var iDataIndex = aiDisplay[j];
var aoData = oSettings.aoData[ iDataIndex ];
if ( aoData.nTr === null )
{
_fnCreateTr( oSettings, iDataIndex );
}
var nRow = aoData.nTr;
/* Remove the old striping classes and then add the new one */
if ( iStripes !== 0 )
{
var sStripe = asStripeClasses[ iRowCount % iStripes ];
if ( aoData._sRowStripe != sStripe )
{
$(nRow).removeClass( aoData._sRowStripe ).addClass( sStripe );
aoData._sRowStripe = sStripe;
}
}
// Row callback functions - might want to manipulate the row
// iRowCount and j are not currently documented. Are they at all
// useful?
_fnCallbackFire( oSettings, 'aoRowCallback', null,
[nRow, aoData._aData, iRowCount, j] );
anRows.push( nRow );
iRowCount++;
}
}
else
{
/* Table is empty - create a row with an empty message in it */
var sZero = oLang.sZeroRecords;
if ( oSettings.iDraw == 1 && _fnDataSource( oSettings ) == 'ajax' )
{
sZero = oLang.sLoadingRecords;
}
else if ( oLang.sEmptyTable && oSettings.fnRecordsTotal() === 0 )
{
sZero = oLang.sEmptyTable;
}
anRows[ 0 ] = $( '<tr/>', { 'class': iStripes ? asStripeClasses[0] : '' } )
.append( $('<td />', {
'valign': 'top',
'colSpan': _fnVisbleColumns( oSettings ),
'class': oSettings.oClasses.sRowEmpty
} ).html( sZero ) )[0];
}
/* Header and footer callbacks */
_fnCallbackFire( oSettings, 'aoHeaderCallback', 'header', [ $(oSettings.nTHead).children('tr')[0],
_fnGetDataMaster( oSettings ), iDisplayStart, iDisplayEnd, aiDisplay ] );
_fnCallbackFire( oSettings, 'aoFooterCallback', 'footer', [ $(oSettings.nTFoot).children('tr')[0],
_fnGetDataMaster( oSettings ), iDisplayStart, iDisplayEnd, aiDisplay ] );
var body = $(oSettings.nTBody);
body.children().detach();
body.append( $(anRows) );
/* Call all required callback functions for the end of a draw */
_fnCallbackFire( oSettings, 'aoDrawCallback', 'draw', [oSettings] );
/* Draw is complete, sorting and filtering must be as well */
oSettings.bSorted = false;
oSettings.bFiltered = false;
oSettings.bDrawing = false;
}
/**
* Redraw the table - taking account of the various features which are enabled
* @param {object} oSettings dataTables settings object
* @param {boolean} [holdPosition] Keep the current paging position. By default
* the paging is reset to the first page
* @memberof DataTable#oApi
*/
function _fnReDraw( settings, holdPosition )
{
var
features = settings.oFeatures,
sort = features.bSort,
filter = features.bFilter;
if ( sort ) {
_fnSort( settings );
}
if ( filter ) {
_fnFilterComplete( settings, settings.oPreviousSearch );
}
else {
// No filtering, so we want to just use the display master
settings.aiDisplay = settings.aiDisplayMaster.slice();
}
if ( holdPosition !== true ) {
settings._iDisplayStart = 0;
}
// Let any modules know about the draw hold position state (used by
// scrolling internally)
settings._drawHold = holdPosition;
_fnDraw( settings );
settings._drawHold = false;
}
/**
* Add the options to the page HTML for the table
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnAddOptionsHtml ( oSettings )
{
var classes = oSettings.oClasses;
var table = $(oSettings.nTable);
var holding = $('<div/>').insertBefore( table ); // Holding element for speed
var features = oSettings.oFeatures;
// All DataTables are wrapped in a div
var insert = $('<div/>', {
id: oSettings.sTableId+'_wrapper',
'class': classes.sWrapper + (oSettings.nTFoot ? '' : ' '+classes.sNoFooter)
} );
oSettings.nHolding = holding[0];
oSettings.nTableWrapper = insert[0];
oSettings.nTableReinsertBefore = oSettings.nTable.nextSibling;
/* Loop over the user set positioning and place the elements as needed */
var aDom = oSettings.sDom.split('');
var featureNode, cOption, nNewNode, cNext, sAttr, j;
for ( var i=0 ; i<aDom.length ; i++ )
{
featureNode = null;
cOption = aDom[i];
if ( cOption == '<' )
{
/* New container div */
nNewNode = $('<div/>')[0];
/* Check to see if we should append an id and/or a class name to the container */
cNext = aDom[i+1];
if ( cNext == "'" || cNext == '"' )
{
sAttr = "";
j = 2;
while ( aDom[i+j] != cNext )
{
sAttr += aDom[i+j];
j++;
}
/* Replace jQuery UI constants @todo depreciated */
if ( sAttr == "H" )
{
sAttr = classes.sJUIHeader;
}
else if ( sAttr == "F" )
{
sAttr = classes.sJUIFooter;
}
/* The attribute can be in the format of "#id.class", "#id" or "class" This logic
* breaks the string into parts and applies them as needed
*/
if ( sAttr.indexOf('.') != -1 )
{
var aSplit = sAttr.split('.');
nNewNode.id = aSplit[0].substr(1, aSplit[0].length-1);
nNewNode.className = aSplit[1];
}
else if ( sAttr.charAt(0) == "#" )
{
nNewNode.id = sAttr.substr(1, sAttr.length-1);
}
else
{
nNewNode.className = sAttr;
}
i += j; /* Move along the position array */
}
insert.append( nNewNode );
insert = $(nNewNode);
}
else if ( cOption == '>' )
{
/* End container div */
insert = insert.parent();
}
// @todo Move options into their own plugins?
else if ( cOption == 'l' && features.bPaginate && features.bLengthChange )
{
/* Length */
featureNode = _fnFeatureHtmlLength( oSettings );
}
else if ( cOption == 'f' && features.bFilter )
{
/* Filter */
featureNode = _fnFeatureHtmlFilter( oSettings );
}
else if ( cOption == 'r' && features.bProcessing )
{
/* pRocessing */
featureNode = _fnFeatureHtmlProcessing( oSettings );
}
else if ( cOption == 't' )
{
/* Table */
featureNode = _fnFeatureHtmlTable( oSettings );
}
else if ( cOption == 'i' && features.bInfo )
{
/* Info */
featureNode = _fnFeatureHtmlInfo( oSettings );
}
else if ( cOption == 'p' && features.bPaginate )
{
/* Pagination */
featureNode = _fnFeatureHtmlPaginate( oSettings );
}
else if ( DataTable.ext.feature.length !== 0 )
{
/* Plug-in features */
var aoFeatures = DataTable.ext.feature;
for ( var k=0, kLen=aoFeatures.length ; k<kLen ; k++ )
{
if ( cOption == aoFeatures[k].cFeature )
{
featureNode = aoFeatures[k].fnInit( oSettings );
break;
}
}
}
/* Add to the 2D features array */
if ( featureNode )
{
var aanFeatures = oSettings.aanFeatures;
if ( ! aanFeatures[cOption] )
{
aanFeatures[cOption] = [];
}
aanFeatures[cOption].push( featureNode );
insert.append( featureNode );
}
}
/* Built our DOM structure - replace the holding div with what we want */
holding.replaceWith( insert );
oSettings.nHolding = null;
}
/**
* Use the DOM source to create up an array of header cells. The idea here is to
* create a layout grid (array) of rows x columns, which contains a reference
* to the cell that that point in the grid (regardless of col/rowspan), such that
* any column / row could be removed and the new grid constructed
* @param array {object} aLayout Array to store the calculated layout in
* @param {node} nThead The header/footer element for the table
* @memberof DataTable#oApi
*/
function _fnDetectHeader ( aLayout, nThead )
{
var nTrs = $(nThead).children('tr');
var nTr, nCell;
var i, k, l, iLen, jLen, iColShifted, iColumn, iColspan, iRowspan;
var bUnique;
var fnShiftCol = function ( a, i, j ) {
var k = a[i];
while ( k[j] ) {
j++;
}
return j;
};
aLayout.splice( 0, aLayout.length );
/* We know how many rows there are in the layout - so prep it */
for ( i=0, iLen=nTrs.length ; i<iLen ; i++ )
{
aLayout.push( [] );
}
/* Calculate a layout array */
for ( i=0, iLen=nTrs.length ; i<iLen ; i++ )
{
nTr = nTrs[i];
iColumn = 0;
/* For every cell in the row... */
nCell = nTr.firstChild;
while ( nCell ) {
if ( nCell.nodeName.toUpperCase() == "TD" ||
nCell.nodeName.toUpperCase() == "TH" )
{
/* Get the col and rowspan attributes from the DOM and sanitise them */
iColspan = nCell.getAttribute('colspan') * 1;
iRowspan = nCell.getAttribute('rowspan') * 1;
iColspan = (!iColspan || iColspan===0 || iColspan===1) ? 1 : iColspan;
iRowspan = (!iRowspan || iRowspan===0 || iRowspan===1) ? 1 : iRowspan;
/* There might be colspan cells already in this row, so shift our target
* accordingly
*/
iColShifted = fnShiftCol( aLayout, i, iColumn );
/* Cache calculation for unique columns */
bUnique = iColspan === 1 ? true : false;
/* If there is col / rowspan, copy the information into the layout grid */
for ( l=0 ; l<iColspan ; l++ )
{
for ( k=0 ; k<iRowspan ; k++ )
{
aLayout[i+k][iColShifted+l] = {
"cell": nCell,
"unique": bUnique
};
aLayout[i+k].nTr = nTr;
}
}
}
nCell = nCell.nextSibling;
}
}
}
/**
* Get an array of unique th elements, one for each column
* @param {object} oSettings dataTables settings object
* @param {node} nHeader automatically detect the layout from this node - optional
* @param {array} aLayout thead/tfoot layout from _fnDetectHeader - optional
* @returns array {node} aReturn list of unique th's
* @memberof DataTable#oApi
*/
function _fnGetUniqueThs ( oSettings, nHeader, aLayout )
{
var aReturn = [];
if ( !aLayout )
{
aLayout = oSettings.aoHeader;
if ( nHeader )
{
aLayout = [];
_fnDetectHeader( aLayout, nHeader );
}
}
for ( var i=0, iLen=aLayout.length ; i<iLen ; i++ )
{
for ( var j=0, jLen=aLayout[i].length ; j<jLen ; j++ )
{
if ( aLayout[i][j].unique &&
(!aReturn[j] || !oSettings.bSortCellsTop) )
{
aReturn[j] = aLayout[i][j].cell;
}
}
}
return aReturn;
}
/**
* Create an Ajax call based on the table's settings, taking into account that
* parameters can have multiple forms, and backwards compatibility.
*
* @param {object} oSettings dataTables settings object
* @param {array} data Data to send to the server, required by
* DataTables - may be augmented by developer callbacks
* @param {function} fn Callback function to run when data is obtained
*/
function _fnBuildAjax( oSettings, data, fn )
{
// Compatibility with 1.9-, allow fnServerData and event to manipulate
_fnCallbackFire( oSettings, 'aoServerParams', 'serverParams', [data] );
// Convert to object based for 1.10+ if using the old array scheme which can
// come from server-side processing or serverParams
if ( data && $.isArray(data) ) {
var tmp = {};
var rbracket = /(.*?)\[\]$/;
$.each( data, function (key, val) {
var match = val.name.match(rbracket);
if ( match ) {
// Support for arrays
var name = match[0];
if ( ! tmp[ name ] ) {
tmp[ name ] = [];
}
tmp[ name ].push( val.value );
}
else {
tmp[val.name] = val.value;
}
} );
data = tmp;
}
var ajaxData;
var ajax = oSettings.ajax;
var instance = oSettings.oInstance;
var callback = function ( json ) {
_fnCallbackFire( oSettings, null, 'xhr', [oSettings, json, oSettings.jqXHR] );
fn( json );
};
if ( $.isPlainObject( ajax ) && ajax.data )
{
ajaxData = ajax.data;
var newData = $.isFunction( ajaxData ) ?
ajaxData( data, oSettings ) : // fn can manipulate data or return
ajaxData; // an object object or array to merge
// If the function returned something, use that alone
data = $.isFunction( ajaxData ) && newData ?
newData :
$.extend( true, data, newData );
// Remove the data property as we've resolved it already and don't want
// jQuery to do it again (it is restored at the end of the function)
delete ajax.data;
}
var baseAjax = {
"data": data,
"success": function (json) {
var error = json.error || json.sError;
if ( error ) {
_fnLog( oSettings, 0, error );
}
oSettings.json = json;
callback( json );
},
"dataType": "json",
"cache": false,
"type": oSettings.sServerMethod,
"error": function (xhr, error, thrown) {
var ret = _fnCallbackFire( oSettings, null, 'xhr', [oSettings, null, oSettings.jqXHR] );
if ( $.inArray( true, ret ) === -1 ) {
if ( error == "parsererror" ) {
_fnLog( oSettings, 0, 'Invalid JSON response', 1 );
}
else if ( xhr.readyState === 4 ) {
_fnLog( oSettings, 0, 'Ajax error', 7 );
}
}
_fnProcessingDisplay( oSettings, false );
}
};
// Store the data submitted for the API
oSettings.oAjaxData = data;
// Allow plug-ins and external processes to modify the data
_fnCallbackFire( oSettings, null, 'preXhr', [oSettings, data] );
if ( oSettings.fnServerData )
{
// DataTables 1.9- compatibility
oSettings.fnServerData.call( instance,
oSettings.sAjaxSource,
$.map( data, function (val, key) { // Need to convert back to 1.9 trad format
return { name: key, value: val };
} ),
callback,
oSettings
);
}
else if ( oSettings.sAjaxSource || typeof ajax === 'string' )
{
// DataTables 1.9- compatibility
oSettings.jqXHR = $.ajax( $.extend( baseAjax, {
url: ajax || oSettings.sAjaxSource
} ) );
}
else if ( $.isFunction( ajax ) )
{
// Is a function - let the caller define what needs to be done
oSettings.jqXHR = ajax.call( instance, data, callback, oSettings );
}
else
{
// Object to extend the base settings
oSettings.jqXHR = $.ajax( $.extend( baseAjax, ajax ) );
// Restore for next time around
ajax.data = ajaxData;
}
}
/**
* Update the table using an Ajax call
* @param {object} settings dataTables settings object
* @returns {boolean} Block the table drawing or not
* @memberof DataTable#oApi
*/
function _fnAjaxUpdate( settings )
{
if ( settings.bAjaxDataGet ) {
settings.iDraw++;
_fnProcessingDisplay( settings, true );
_fnBuildAjax(
settings,
_fnAjaxParameters( settings ),
function(json) {
_fnAjaxUpdateDraw( settings, json );
}
);
return false;
}
return true;
}
/**
* Build up the parameters in an object needed for a server-side processing
* request. Note that this is basically done twice, is different ways - a modern
* method which is used by default in DataTables 1.10 which uses objects and
* arrays, or the 1.9- method with is name / value pairs. 1.9 method is used if
* the sAjaxSource option is used in the initialisation, or the legacyAjax
* option is set.
* @param {object} oSettings dataTables settings object
* @returns {bool} block the table drawing or not
* @memberof DataTable#oApi
*/
function _fnAjaxParameters( settings )
{
var
columns = settings.aoColumns,
columnCount = columns.length,
features = settings.oFeatures,
preSearch = settings.oPreviousSearch,
preColSearch = settings.aoPreSearchCols,
i, data = [], dataProp, column, columnSearch,
sort = _fnSortFlatten( settings ),
displayStart = settings._iDisplayStart,
displayLength = features.bPaginate !== false ?
settings._iDisplayLength :
-1;
var param = function ( name, value ) {
data.push( { 'name': name, 'value': value } );
};
// DataTables 1.9- compatible method
param( 'sEcho', settings.iDraw );
param( 'iColumns', columnCount );
param( 'sColumns', _pluck( columns, 'sName' ).join(',') );
param( 'iDisplayStart', displayStart );
param( 'iDisplayLength', displayLength );
// DataTables 1.10+ method
var d = {
draw: settings.iDraw,
columns: [],
order: [],
start: displayStart,
length: displayLength,
search: {
value: preSearch.sSearch,
regex: preSearch.bRegex
}
};
for ( i=0 ; i<columnCount ; i++ ) {
column = columns[i];
columnSearch = preColSearch[i];
dataProp = typeof column.mData=="function" ? 'function' : column.mData ;
d.columns.push( {
data: dataProp,
name: column.sName,
searchable: column.bSearchable,
orderable: column.bSortable,
search: {
value: columnSearch.sSearch,
regex: columnSearch.bRegex
}
} );
param( "mDataProp_"+i, dataProp );
if ( features.bFilter ) {
param( 'sSearch_'+i, columnSearch.sSearch );
param( 'bRegex_'+i, columnSearch.bRegex );
param( 'bSearchable_'+i, column.bSearchable );
}
if ( features.bSort ) {
param( 'bSortable_'+i, column.bSortable );
}
}
if ( features.bFilter ) {
param( 'sSearch', preSearch.sSearch );
param( 'bRegex', preSearch.bRegex );
}
if ( features.bSort ) {
$.each( sort, function ( i, val ) {
d.order.push( { column: val.col, dir: val.dir } );
param( 'iSortCol_'+i, val.col );
param( 'sSortDir_'+i, val.dir );
} );
param( 'iSortingCols', sort.length );
}
// If the legacy.ajax parameter is null, then we automatically decide which
// form to use, based on sAjaxSource
var legacy = DataTable.ext.legacy.ajax;
if ( legacy === null ) {
return settings.sAjaxSource ? data : d;
}
// Otherwise, if legacy has been specified then we use that to decide on the
// form
return legacy ? data : d;
}
/**
* Data the data from the server (nuking the old) and redraw the table
* @param {object} oSettings dataTables settings object
* @param {object} json json data return from the server.
* @param {string} json.sEcho Tracking flag for DataTables to match requests
* @param {int} json.iTotalRecords Number of records in the data set, not accounting for filtering
* @param {int} json.iTotalDisplayRecords Number of records in the data set, accounting for filtering
* @param {array} json.aaData The data to display on this page
* @param {string} [json.sColumns] Column ordering (sName, comma separated)
* @memberof DataTable#oApi
*/
function _fnAjaxUpdateDraw ( settings, json )
{
// v1.10 uses camelCase variables, while 1.9 uses Hungarian notation.
// Support both
var compat = function ( old, modern ) {
return json[old] !== undefined ? json[old] : json[modern];
};
var data = _fnAjaxDataSrc( settings, json );
var draw = compat( 'sEcho', 'draw' );
var recordsTotal = compat( 'iTotalRecords', 'recordsTotal' );
var recordsFiltered = compat( 'iTotalDisplayRecords', 'recordsFiltered' );
if ( draw ) {
// Protect against out of sequence returns
if ( draw*1 < settings.iDraw ) {
return;
}
settings.iDraw = draw * 1;
}
_fnClearTable( settings );
settings._iRecordsTotal = parseInt(recordsTotal, 10);
settings._iRecordsDisplay = parseInt(recordsFiltered, 10);
for ( var i=0, ien=data.length ; i<ien ; i++ ) {
_fnAddData( settings, data[i] );
}
settings.aiDisplay = settings.aiDisplayMaster.slice();
settings.bAjaxDataGet = false;
_fnDraw( settings );
if ( ! settings._bInitComplete ) {
_fnInitComplete( settings, json );
}
settings.bAjaxDataGet = true;
_fnProcessingDisplay( settings, false );
}
/**
* Get the data from the JSON data source to use for drawing a table. Using
* `_fnGetObjectDataFn` allows the data to be sourced from a property of the
* source object, or from a processing function.
* @param {object} oSettings dataTables settings object
* @param {object} json Data source object / array from the server
* @return {array} Array of data to use
*/
function _fnAjaxDataSrc ( oSettings, json )
{
var dataSrc = $.isPlainObject( oSettings.ajax ) && oSettings.ajax.dataSrc !== undefined ?
oSettings.ajax.dataSrc :
oSettings.sAjaxDataProp; // Compatibility with 1.9-.
// Compatibility with 1.9-. In order to read from aaData, check if the
// default has been changed, if not, check for aaData
if ( dataSrc === 'data' ) {
return json.aaData || json[dataSrc];
}
return dataSrc !== "" ?
_fnGetObjectDataFn( dataSrc )( json ) :
json;
}
/**
* Generate the node required for filtering text
* @returns {node} Filter control element
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnFeatureHtmlFilter ( settings )
{
var classes = settings.oClasses;
var tableId = settings.sTableId;
var language = settings.oLanguage;
var previousSearch = settings.oPreviousSearch;
var features = settings.aanFeatures;
var input = '<input type="search" class="'+classes.sFilterInput+'"/>';
var str = language.sSearch;
str = str.match(/_INPUT_/) ?
str.replace('_INPUT_', input) :
str+input;
var filter = $('<div/>', {
'id': ! features.f ? tableId+'_filter' : null,
'class': classes.sFilter
} )
.append( $('<p/>' ).append( str ) );
var searchFn = function() {
/* Update all other filter input elements for the new display */
var n = features.f;
var val = !this.value ? "" : this.value; // mental IE8 fix :-(
/* Now do the filter */
if ( val != previousSearch.sSearch ) {
_fnFilterComplete( settings, {
"sSearch": val,
"bRegex": previousSearch.bRegex,
"bSmart": previousSearch.bSmart ,
"bCaseInsensitive": previousSearch.bCaseInsensitive
} );
// Need to redraw, without resorting
settings._iDisplayStart = 0;
_fnDraw( settings );
}
};
var searchDelay = settings.searchDelay !== null ?
settings.searchDelay :
_fnDataSource( settings ) === 'ssp' ?
400 :
0;
var jqFilter = $('input', filter)
.val( previousSearch.sSearch )
.attr( 'placeholder', language.sSearchPlaceholder )
.on(
'keyup.DT search.DT input.DT paste.DT cut.DT',
searchDelay ?
_fnThrottle( searchFn, searchDelay ) :
searchFn
)
.on( 'keypress.DT', function(e) {
/* Prevent form submission */
if ( e.keyCode == 13 ) {
return false;
}
} )
.attr('aria-controls', tableId);
// Update the input elements whenever the table is filtered
$(settings.nTable).on( 'search.dt.DT', function ( ev, s ) {
if ( settings === s ) {
// IE9 throws an 'unknown error' if document.activeElement is used
// inside an iframe or frame...
try {
if ( jqFilter[0] !== document.activeElement ) {
jqFilter.val( previousSearch.sSearch );
}
}
catch ( e ) {}
}
} );
return filter[0];
}
/**
* Filter the table using both the global filter and column based filtering
* @param {object} oSettings dataTables settings object
* @param {object} oSearch search information
* @param {int} [iForce] force a research of the master array (1) or not (undefined or 0)
* @memberof DataTable#oApi
*/
function _fnFilterComplete ( oSettings, oInput, iForce )
{
var oPrevSearch = oSettings.oPreviousSearch;
var aoPrevSearch = oSettings.aoPreSearchCols;
var fnSaveFilter = function ( oFilter ) {
/* Save the filtering values */
oPrevSearch.sSearch = oFilter.sSearch;
oPrevSearch.bRegex = oFilter.bRegex;
oPrevSearch.bSmart = oFilter.bSmart;
oPrevSearch.bCaseInsensitive = oFilter.bCaseInsensitive;
};
var fnRegex = function ( o ) {
// Backwards compatibility with the bEscapeRegex option
return o.bEscapeRegex !== undefined ? !o.bEscapeRegex : o.bRegex;
};
// Resolve any column types that are unknown due to addition or invalidation
// @todo As per sort - can this be moved into an event handler?
_fnColumnTypes( oSettings );
/* In server-side processing all filtering is done by the server, so no point hanging around here */
if ( _fnDataSource( oSettings ) != 'ssp' )
{
/* Global filter */
_fnFilter( oSettings, oInput.sSearch, iForce, fnRegex(oInput), oInput.bSmart, oInput.bCaseInsensitive );
fnSaveFilter( oInput );
/* Now do the individual column filter */
for ( var i=0 ; i<aoPrevSearch.length ; i++ )
{
_fnFilterColumn( oSettings, aoPrevSearch[i].sSearch, i, fnRegex(aoPrevSearch[i]),
aoPrevSearch[i].bSmart, aoPrevSearch[i].bCaseInsensitive );
}
/* Custom filtering */
_fnFilterCustom( oSettings );
}
else
{
fnSaveFilter( oInput );
}
/* Tell the draw function we have been filtering */
oSettings.bFiltered = true;
_fnCallbackFire( oSettings, null, 'search', [oSettings] );
}
/**
* Apply custom filtering functions
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnFilt | ngs )
{
var filters = DataTable.ext.search;
var displayRows = settings.aiDisplay;
var row, rowIdx;
for ( var i=0, ien=filters.length ; i<ien ; i++ ) {
var rows = [];
// Loop over each row and see if it should be included
for ( var j=0, jen=displayRows.length ; j<jen ; j++ ) {
rowIdx = displayRows[ j ];
row = settings.aoData[ rowIdx ];
if ( filters[i]( settings, row._aFilterData, rowIdx, row._aData, j ) ) {
rows.push( rowIdx );
}
}
// So the array reference doesn't break set the results into the
// existing array
displayRows.length = 0;
$.merge( displayRows, rows );
}
}
/**
* Filter the table on a per-column basis
* @param {object} oSettings dataTables settings object
* @param {string} sInput string to filter on
* @param {int} iColumn column to filter
* @param {bool} bRegex treat search string as a regular expression or not
* @param {bool} bSmart use smart filtering or not
* @param {bool} bCaseInsensitive Do case insenstive matching or not
* @memberof DataTable#oApi
*/
function _fnFilterColumn ( settings, searchStr, colIdx, regex, smart, caseInsensitive )
{
if ( searchStr === '' ) {
return;
}
var data;
var out = [];
var display = settings.aiDisplay;
var rpSearch = _fnFilterCreateSearch( searchStr, regex, smart, caseInsensitive );
for ( var i=0 ; i<display.length ; i++ ) {
data = settings.aoData[ display[i] ]._aFilterData[ colIdx ];
if ( rpSearch.test( data ) ) {
out.push( display[i] );
}
}
settings.aiDisplay = out;
}
/**
* Filter the data table based on user input and draw the table
* @param {object} settings dataTables settings object
* @param {string} input string to filter on
* @param {int} force optional - force a research of the master array (1) or not (undefined or 0)
* @param {bool} regex treat as a regular expression or not
* @param {bool} smart perform smart filtering or not
* @param {bool} caseInsensitive Do case insenstive matching or not
* @memberof DataTable#oApi
*/
function _fnFilter( settings, input, force, regex, smart, caseInsensitive )
{
var rpSearch = _fnFilterCreateSearch( input, regex, smart, caseInsensitive );
var prevSearch = settings.oPreviousSearch.sSearch;
var displayMaster = settings.aiDisplayMaster;
var display, invalidated, i;
var filtered = [];
// Need to take account of custom filtering functions - always filter
if ( DataTable.ext.search.length !== 0 ) {
force = true;
}
// Check if any of the rows were invalidated
invalidated = _fnFilterData( settings );
// If the input is blank - we just want the full data set
if ( input.length <= 0 ) {
settings.aiDisplay = displayMaster.slice();
}
else {
// New search - start from the master array
if ( invalidated ||
force ||
prevSearch.length > input.length ||
input.indexOf(prevSearch) !== 0 ||
settings.bSorted // On resort, the display master needs to be
// re-filtered since indexes will have changed
) {
settings.aiDisplay = displayMaster.slice();
}
// Search the display array
display = settings.aiDisplay;
for ( i=0 ; i<display.length ; i++ ) {
if ( rpSearch.test( settings.aoData[ display[i] ]._sFilterRow ) ) {
filtered.push( display[i] );
}
}
settings.aiDisplay = filtered;
}
}
/**
* Build a regular expression object suitable for searching a table
* @param {string} sSearch string to search for
* @param {bool} bRegex treat as a regular expression or not
* @param {bool} bSmart perform smart filtering or not
* @param {bool} bCaseInsensitive Do case insensitive matching or not
* @returns {RegExp} constructed object
* @memberof DataTable#oApi
*/
function _fnFilterCreateSearch( search, regex, smart, caseInsensitive )
{
search = regex ?
search :
_fnEscapeRegex( search );
if ( smart ) {
/* For smart filtering we want to allow the search to work regardless of
* word order. We also want double quoted text to be preserved, so word
* order is important - a la google. So this is what we want to
* generate:
*
* ^(?=.*?\bone\b)(?=.*?\btwo three\b)(?=.*?\bfour\b).*$
*/
var a = $.map( search.match( /"[^"]+"|[^ ]+/g ) || [''], function ( word ) {
if ( word.charAt(0) === '"' ) {
var m = word.match( /^"(.*)"$/ );
word = m ? m[1] : word;
}
return word.replace('"', '');
} );
search = '^(?=.*?'+a.join( ')(?=.*?' )+').*$';
}
return new RegExp( search, caseInsensitive ? 'i' : '' );
}
/**
* Escape a string such that it can be used in a regular expression
* @param {string} sVal string to escape
* @returns {string} escaped string
* @memberof DataTable#oApi
*/
var _fnEscapeRegex = DataTable.util.escapeRegex;
var __filter_div = $('<div>')[0];
var __filter_div_textContent = __filter_div.textContent !== undefined;
// Update the filtering data for each row if needed (by invalidation or first run)
function _fnFilterData ( settings )
{
var columns = settings.aoColumns;
var column;
var i, j, ien, jen, filterData, cellData, row;
var fomatters = DataTable.ext.type.search;
var wasInvalidated = false;
for ( i=0, ien=settings.aoData.length ; i<ien ; i++ ) {
row = settings.aoData[i];
if ( ! row._aFilterData ) {
filterData = [];
for ( j=0, jen=columns.length ; j<jen ; j++ ) {
column = columns[j];
if ( column.bSearchable ) {
cellData = _fnGetCellData( settings, i, j, 'filter' );
if ( fomatters[ column.sType ] ) {
cellData = fomatters[ column.sType ]( cellData );
}
// Search in DataTables 1.10 is string based. In 1.11 this
// should be altered to also allow strict type checking.
if ( cellData === null ) {
cellData = '';
}
if ( typeof cellData !== 'string' && cellData.toString ) {
cellData = cellData.toString();
}
}
else {
cellData = '';
}
// If it looks like there is an HTML entity in the string,
// attempt to decode it so sorting works as expected. Note that
// we could use a single line of jQuery to do this, but the DOM
// method used here is much faster http://jsperf.com/html-decode
if ( cellData.indexOf && cellData.indexOf('&') !== -1 ) {
__filter_div.innerHTML = cellData;
cellData = __filter_div_textContent ?
__filter_div.textContent :
__filter_div.innerText;
}
if ( cellData.replace ) {
cellData = cellData.replace(/[\r\n]/g, '');
}
filterData.push( cellData );
}
row._aFilterData = filterData;
row._sFilterRow = filterData.join(' ');
wasInvalidated = true;
}
}
return wasInvalidated;
}
/**
* Convert from the internal Hungarian notation to camelCase for external
* interaction
* @param {object} obj Object to convert
* @returns {object} Inverted object
* @memberof DataTable#oApi
*/
function _fnSearchToCamel ( obj )
{
return {
search: obj.sSearch,
smart: obj.bSmart,
regex: obj.bRegex,
caseInsensitive: obj.bCaseInsensitive
};
}
/**
* Convert from camelCase notation to the internal Hungarian. We could use the
* Hungarian convert function here, but this is cleaner
* @param {object} obj Object to convert
* @returns {object} Inverted object
* @memberof DataTable#oApi
*/
function _fnSearchToHung ( obj )
{
return {
sSearch: obj.search,
bSmart: obj.smart,
bRegex: obj.regex,
bCaseInsensitive: obj.caseInsensitive
};
}
/**
* Generate the node required for the info display
* @param {object} oSettings dataTables settings object
* @returns {node} Information element
* @memberof DataTable#oApi
*/
function _fnFeatureHtmlInfo ( settings )
{
var
tid = settings.sTableId,
nodes = settings.aanFeatures.i,
n = $('<div/>', {
'class': settings.oClasses.sInfo,
'id': ! nodes ? tid+'_info' : null
} );
if ( ! nodes ) {
// Update display on each draw
settings.aoDrawCallback.push( {
"fn": _fnUpdateInfo,
"sName": "information"
} );
n
.attr( 'role', 'status' )
.attr( 'aria-live', 'polite' );
// Table is described by our info div
$(settings.nTable).attr( 'aria-describedby', tid+'_info' );
}
return n[0];
}
/**
* Update the information elements in the display
* @param {object} settings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnUpdateInfo ( settings )
{
/* Show information about the table */
var nodes = settings.aanFeatures.i;
if ( nodes.length === 0 ) {
return;
}
var
lang = settings.oLanguage,
start = settings._iDisplayStart+1,
end = settings.fnDisplayEnd(),
max = settings.fnRecordsTotal(),
total = settings.fnRecordsDisplay(),
out = total ?
lang.sInfo :
lang.sInfoEmpty;
if ( total !== max ) {
/* Record set after filtering */
out += ' ' + lang.sInfoFiltered;
}
// Convert the macros
out += lang.sInfoPostFix;
out = _fnInfoMacros( settings, out );
var callback = lang.fnInfoCallback;
if ( callback !== null ) {
out = callback.call( settings.oInstance,
settings, start, end, max, total, out
);
}
$(nodes).html( out );
}
function _fnInfoMacros ( settings, str )
{
// When infinite scrolling, we are always starting at 1. _iDisplayStart is used only
// internally
var
formatter = settings.fnFormatNumber,
start = settings._iDisplayStart+1,
len = settings._iDisplayLength,
vis = settings.fnRecordsDisplay(),
all = len === -1;
return str.
replace(/_START_/g, formatter.call( settings, start ) ).
replace(/_END_/g, formatter.call( settings, settings.fnDisplayEnd() ) ).
replace(/_MAX_/g, formatter.call( settings, settings.fnRecordsTotal() ) ).
replace(/_TOTAL_/g, formatter.call( settings, vis ) ).
replace(/_PAGE_/g, formatter.call( settings, all ? 1 : Math.ceil( start / len ) ) ).
replace(/_PAGES_/g, formatter.call( settings, all ? 1 : Math.ceil( vis / len ) ) );
}
/**
* Draw the table for the first time, adding all required features
* @param {object} settings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnInitialise ( settings )
{
var i, iLen, iAjaxStart=settings.iInitDisplayStart;
var columns = settings.aoColumns, column;
var features = settings.oFeatures;
var deferLoading = settings.bDeferLoading; // value modified by the draw
/* Ensure that the table data is fully initialised */
if ( ! settings.bInitialised ) {
setTimeout( function(){ _fnInitialise( settings ); }, 200 );
return;
}
/* Show the display HTML options */
_fnAddOptionsHtml( settings );
/* Build and draw the header / footer for the table */
_fnBuildHead( settings );
_fnDrawHead( settings, settings.aoHeader );
_fnDrawHead( settings, settings.aoFooter );
/* Okay to show that something is going on now */
_fnProcessingDisplay( settings, true );
/* Calculate sizes for columns */
if ( features.bAutoWidth ) {
_fnCalculateColumnWidths( settings );
}
for ( i=0, iLen=columns.length ; i<iLen ; i++ ) {
column = columns[i];
if ( column.sWidth ) {
column.nTh.style.width = _fnStringToCss( column.sWidth );
}
}
_fnCallbackFire( settings, null, 'preInit', [settings] );
// If there is default sorting required - let's do it. The sort function
// will do the drawing for us. Otherwise we draw the table regardless of the
// Ajax source - this allows the table to look initialised for Ajax sourcing
// data (show 'loading' message possibly)
_fnReDraw( settings );
// Server-side processing init complete is done by _fnAjaxUpdateDraw
var dataSrc = _fnDataSource( settings );
if ( dataSrc != 'ssp' || deferLoading ) {
// if there is an ajax source load the data
if ( dataSrc == 'ajax' ) {
_fnBuildAjax( settings, [], function(json) {
var aData = _fnAjaxDataSrc( settings, json );
// Got the data - add it to the table
for ( i=0 ; i<aData.length ; i++ ) {
_fnAddData( settings, aData[i] );
}
// Reset the init display for cookie saving. We've already done
// a filter, and therefore cleared it before. So we need to make
// it appear 'fresh'
settings.iInitDisplayStart = iAjaxStart;
_fnReDraw( settings );
_fnProcessingDisplay( settings, false );
_fnInitComplete( settings, json );
}, settings );
}
else {
_fnProcessingDisplay( settings, false );
_fnInitComplete( settings );
}
}
}
/**
* Draw the table for the first time, adding all required features
* @param {object} oSettings dataTables settings object
* @param {object} [json] JSON from the server that completed the table, if using Ajax source
* with client-side processing (optional)
* @memberof DataTable#oApi
*/
function _fnInitComplete ( settings, json )
{
settings._bInitComplete = true;
// When data was added after the initialisation (data or Ajax) we need to
// calculate the column sizing
if ( json || settings.oInit.aaData ) {
_fnAdjustColumnSizing( settings );
}
_fnCallbackFire( settings, null, 'plugin-init', [settings, json] );
_fnCallbackFire( settings, 'aoInitComplete', 'init', [settings, json] );
}
function _fnLengthChange ( settings, val )
{
var len = parseInt( val, 10 );
settings._iDisplayLength = len;
_fnLengthOverflow( settings );
// Fire length change event
_fnCallbackFire( settings, null, 'length', [settings, len] );
}
/**
* Generate the node required for user display length changing
* @param {object} settings dataTables settings object
* @returns {node} Display length feature node
* @memberof DataTable#oApi
*/
function _fnFeatureHtmlLength ( settings )
{
var
classes = settings.oClasses,
tableId = settings.sTableId,
menu = settings.aLengthMenu,
d2 = $.isArray( menu[0] ),
lengths = d2 ? menu[0] : menu,
language = d2 ? menu[1] : menu;
var select = $('<select/>', {
'name': tableId+'_length',
'aria-controls': tableId,
'class': classes.sLengthSelect
} );
for ( var i=0, ien=lengths.length ; i<ien ; i++ ) {
select[0][ i ] = new Option(
typeof language[i] === 'number' ?
settings.fnFormatNumber( language[i] ) :
language[i],
lengths[i]
);
}
var div = $('<div><p/></div>').addClass( classes.sLength );
if ( ! settings.aanFeatures.l ) {
div[0].id = tableId+'_length';
}
div.children().append(
settings.oLanguage.sLengthMenu.replace( '_MENU_', select[0].outerHTML )
);
// Can't use `select` variable as user might provide their own and the
// reference is broken by the use of outerHTML
$('select', div)
.val( settings._iDisplayLength )
.on( 'change.DT', function(e) {
_fnLengthChange( settings, $(this).val() );
_fnDraw( settings );
} );
// Update node value whenever anything changes the table's length
$(settings.nTable).on( 'length.dt.DT', function (e, s, len) {
if ( settings === s ) {
$('select', div).val( len );
}
} );
return div[0];
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Note that most of the paging logic is done in
* DataTable.ext.pager
*/
/**
* Generate the node required for default pagination
* @param {object} oSettings dataTables settings object
* @returns {node} Pagination feature node
* @memberof DataTable#oApi
*/
function _fnFeatureHtmlPaginate ( settings )
{
var
type = settings.sPaginationType,
plugin = DataTable.ext.pager[ type ],
modern = typeof plugin === 'function',
redraw = function( settings ) {
_fnDraw( settings );
},
node = $('<div/>').addClass( settings.oClasses.sPaging + type )[0],
features = settings.aanFeatures;
if ( ! modern ) {
plugin.fnInit( settings, node, redraw );
}
/* Add a draw callback for the pagination on first instance, to update the paging display */
if ( ! features.p )
{
node.id = settings.sTableId+'_paginate';
settings.aoDrawCallback.push( {
"fn": function( settings ) {
if ( modern ) {
var
start = settings._iDisplayStart,
len = settings._iDisplayLength,
visRecords = settings.fnRecordsDisplay(),
all = len === -1,
page = all ? 0 : Math.ceil( start / len ),
pages = all ? 1 : Math.ceil( visRecords / len ),
buttons = plugin(page, pages),
i, ien;
for ( i=0, ien=features.p.length ; i<ien ; i++ ) {
_fnRenderer( settings, 'pageButton' )(
settings, features.p[i], i, buttons, page, pages
);
}
}
else {
plugin.fnUpdate( settings, redraw );
}
},
"sName": "pagination"
} );
}
return node;
}
/**
* Alter the display settings to change the page
* @param {object} settings DataTables settings object
* @param {string|int} action Paging action to take: "first", "previous",
* "next" or "last" or page number to jump to (integer)
* @param [bool] redraw Automatically draw the update or not
* @returns {bool} true page has changed, false - no change
* @memberof DataTable#oApi
*/
function _fnPageChange ( settings, action, redraw )
{
var
start = settings._iDisplayStart,
len = settings._iDisplayLength,
records = settings.fnRecordsDisplay();
if ( records === 0 || len === -1 )
{
start = 0;
}
else if ( typeof action === "number" )
{
start = action * len;
if ( start > records )
{
start = 0;
}
}
else if ( action == "first" )
{
start = 0;
}
else if ( action == "previous" )
{
start = len >= 0 ?
start - len :
0;
if ( start < 0 )
{
start = 0;
}
}
else if ( action == "next" )
{
if ( start + len < records )
{
start += len;
}
}
else if ( action == "last" )
{
start = Math.floor( (records-1) / len) * len;
}
else
{
_fnLog( settings, 0, "Unknown paging action: "+action, 5 );
}
var changed = settings._iDisplayStart !== start;
settings._iDisplayStart = start;
if ( changed ) {
_fnCallbackFire( settings, null, 'page', [settings] );
if ( redraw ) {
_fnDraw( settings );
}
}
return changed;
}
/**
* Generate the node required for the processing node
* @param {object} settings dataTables settings object
* @returns {node} Processing element
* @memberof DataTable#oApi
*/
function _fnFeatureHtmlProcessing ( settings )
{
return $('<div/>', {
'id': ! settings.aanFeatures.r ? settings.sTableId+'_processing' : null,
'class': settings.oClasses.sProcessing
} )
.html( settings.oLanguage.sProcessing )
.insertBefore( settings.nTable )[0];
}
/**
* Display or hide the processing indicator
* @param {object} settings dataTables settings object
* @param {bool} show Show the processing indicator (true) or not (false)
* @memberof DataTable#oApi
*/
function _fnProcessingDisplay ( settings, show )
{
if ( settings.oFeatures.bProcessing ) {
$(settings.aanFeatures.r).css( 'display', show ? 'block' : 'none' );
}
_fnCallbackFire( settings, null, 'processing', [settings, show] );
}
/**
* Add any control elements for the table - specifically scrolling
* @param {object} settings dataTables settings object
* @returns {node} Node to add to the DOM
* @memberof DataTable#oApi
*/
function _fnFeatureHtmlTable ( settings )
{
var table = $(settings.nTable);
// Add the ARIA grid role to the table
table.attr( 'role', 'grid' );
// Scrolling from here on in
var scroll = settings.oScroll;
if ( scroll.sX === '' && scroll.sY === '' ) {
return settings.nTable;
}
var scrollX = scroll.sX;
var scrollY = scroll.sY;
var classes = settings.oClasses;
var caption = table.children('caption');
var captionSide = caption.length ? caption[0]._captionSide : null;
var headerClone = $( table[0].cloneNode(false) );
var footerClone = $( table[0].cloneNode(false) );
var footer = table.children('tfoot');
var _div = '<div/>';
var size = function ( s ) {
return !s ? null : _fnStringToCss( s );
};
if ( ! footer.length ) {
footer = null;
}
/*
* The HTML structure that we want to generate in this function is:
* div - scroller
* div - scroll head
* div - scroll head inner
* table - scroll head table
* thead - thead
* div - scroll body
* table - table (master table)
* thead - thead clone for sizing
* tbody - tbody
* div - scroll foot
* div - scroll foot inner
* table - scroll foot table
* tfoot - tfoot
*/
var scroller = $( _div, { 'class': classes.sScrollWrapper } )
.append(
$(_div, { 'class': classes.sScrollHead } )
.css( {
overflow: 'hidden',
position: 'relative',
border: 0,
width: scrollX ? size(scrollX) : '100%'
} )
.append(
$(_div, { 'class': classes.sScrollHeadInner } )
.css( {
'box-sizing': 'content-box',
width: scroll.sXInner || '100%'
} )
.append(
headerClone
.removeAttr('id')
.css( 'margin-left', 0 )
.append( captionSide === 'top' ? caption : null )
.append(
table.children('thead')
)
)
)
)
.append(
$(_div, { 'class': classes.sScrollBody } )
.css( {
position: 'relative',
overflow: 'auto',
width: size( scrollX )
} )
.append( table )
);
if ( footer ) {
scroller.append(
$(_div, { 'class': classes.sScrollFoot } )
.css( {
overflow: 'hidden',
border: 0,
width: scrollX ? size(scrollX) : '100%'
} )
.append(
$(_div, { 'class': classes.sScrollFootInner } )
.append(
footerClone
.removeAttr('id')
.css( 'margin-left', 0 )
.append( captionSide === 'bottom' ? caption : null )
.append(
table.children('tfoot')
)
)
)
);
}
var children = scroller.children();
var scrollHead = children[0];
var scrollBody = children[1];
var scrollFoot = footer ? children[2] : null;
// When the body is scrolled, then we also want to scroll the headers
if ( scrollX ) {
$(scrollBody).on( 'scroll.DT', function (e) {
var scrollLeft = this.scrollLeft;
scrollHead.scrollLeft = scrollLeft;
if ( footer ) {
scrollFoot.scrollLeft = scrollLeft;
}
} );
}
$(scrollBody).css(
scrollY && scroll.bCollapse ? 'max-height' : 'height',
scrollY
);
settings.nScrollHead = scrollHead;
settings.nScrollBody = scrollBody;
settings.nScrollFoot = scrollFoot;
// On redraw - align columns
settings.aoDrawCallback.push( {
"fn": _fnScrollDraw,
"sName": "scrolling"
} );
return scroller[0];
}
/**
* Update the header, footer and body tables for resizing - i.e. column
* alignment.
*
* Welcome to the most horrible function DataTables. The process that this
* function follows is basically:
* 1. Re-create the table inside the scrolling div
* 2. Take live measurements from the DOM
* 3. Apply the measurements to align the columns
* 4. Clean up
*
* @param {object} settings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnScrollDraw ( settings )
{
// Given that this is such a monster function, a lot of variables are use
// to try and keep the minimised size as small as possible
var
scroll = settings.oScroll,
scrollX = scroll.sX,
scrollXInner = scroll.sXInner,
scrollY = scroll.sY,
barWidth = scroll.iBarWidth,
divHeader = $(settings.nScrollHead),
divHeaderStyle = divHeader[0].style,
divHeaderInner = divHeader.children('div'),
divHeaderInnerStyle = divHeaderInner[0].style,
divHeaderTable = divHeaderInner.children('table'),
divBodyEl = settings.nScrollBody,
divBody = $(divBodyEl),
divBodyStyle = divBodyEl.style,
divFooter = $(settings.nScrollFoot),
divFooterInner = divFooter.children('div'),
divFooterTable = divFooterInner.children('table'),
header = $(settings.nTHead),
table = $(settings.nTable),
tableEl = table[0],
tableStyle = tableEl.style,
footer = settings.nTFoot ? $(settings.nTFoot) : null,
browser = settings.oBrowser,
ie67 = browser.bScrollOversize,
dtHeaderCells = _pluck( settings.aoColumns, 'nTh' ),
headerTrgEls, footerTrgEls,
headerSrcEls, footerSrcEls,
headerCopy, footerCopy,
headerWidths=[], footerWidths=[],
headerContent=[], footerContent=[],
idx, correction, sanityWidth,
zeroOut = function(nSizer) {
var style = nSizer.style;
style.paddingTop = "0";
style.paddingBottom = "0";
style.borderTopWidth = "0";
style.borderBottomWidth = "0";
style.height = 0;
};
// If the scrollbar visibility has changed from the last draw, we need to
// adjust the column sizes as the table width will have changed to account
// for the scrollbar
var scrollBarVis = divBodyEl.scrollHeight > divBodyEl.clientHeight;
if ( settings.scrollBarVis !== scrollBarVis && settings.scrollBarVis !== undefined ) {
settings.scrollBarVis = scrollBarVis;
_fnAdjustColumnSizing( settings );
return; // adjust column sizing will call this function again
}
else {
settings.scrollBarVis = scrollBarVis;
}
/*
* 1. Re-create the table inside the scrolling div
*/
// Remove the old minimised thead and tfoot elements in the inner table
table.children('thead, tfoot').remove();
if ( footer ) {
footerCopy = footer.clone().prependTo( table );
footerTrgEls = footer.find('tr'); // the original tfoot is in its own table and must be sized
footerSrcEls = footerCopy.find('tr');
}
// Clone the current header and footer elements and then place it into the inner table
headerCopy = header.clone().prependTo( table );
headerTrgEls = header.find('tr'); // original header is in its own table
headerSrcEls = headerCopy.find('tr');
headerCopy.find('th, td').removeAttr('tabindex');
/*
* 2. Take live measurements from the DOM - do not alter the DOM itself!
*/
// Remove old sizing and apply the calculated column widths
// Get the unique column headers in the newly created (cloned) header. We want to apply the
// calculated sizes to this header
if ( ! scrollX )
{
divBodyStyle.width = '100%';
divHeader[0].style.width = '100%';
}
$.each( _fnGetUniqueThs( settings, headerCopy ), function ( i, el ) {
idx = _fnVisibleToColumnIndex( settings, i );
el.style.width = settings.aoColumns[idx].sWidth;
} );
if ( footer ) {
_fnApplyToChildren( function(n) {
n.style.width = "";
}, footerSrcEls );
}
// Size the table as a whole
sanityWidth = table.outerWidth();
if ( scrollX === "" ) {
// No x scrolling
tableStyle.width = "100%";
// IE7 will make the width of the table when 100% include the scrollbar
// - which is shouldn't. When there is a scrollbar we need to take this
// into account.
if ( ie67 && (table.find('tbody').height() > divBodyEl.offsetHeight ||
divBody.css('overflow-y') == "scroll")
) {
tableStyle.width = _fnStringToCss( table.outerWidth() - barWidth);
}
// Recalculate the sanity width
sanityWidth = table.outerWidth();
}
else if ( scrollXInner !== "" ) {
// legacy x scroll inner has been given - use it
tableStyle.width = _fnStringToCss(scrollXInner);
// Recalculate the sanity width
sanityWidth = table.outerWidth();
}
// Hidden header should have zero height, so remove padding and borders. Then
// set the width based on the real headers
// Apply all styles in one pass
_fnApplyToChildren( zeroOut, headerSrcEls );
// Read all widths in next pass
_fnApplyToChildren( function(nSizer) {
headerContent.push( nSizer.innerHTML );
headerWidths.push( _fnStringToCss( $(nSizer).css('width') ) );
}, headerSrcEls );
// Apply all widths in final pass
_fnApplyToChildren( function(nToSize, i) {
// Only apply widths to the DataTables detected header cells - this
// prevents complex headers from having contradictory sizes applied
if ( $.inArray( nToSize, dtHeaderCells ) !== -1 ) {
nToSize.style.width = headerWidths[i];
}
}, headerTrgEls );
$(headerSrcEls).height(0);
/* Same again with the footer if we have one */
if ( footer )
{
_fnApplyToChildren( zeroOut, footerSrcEls );
_fnApplyToChildren( function(nSizer) {
footerContent.push( nSizer.innerHTML );
footerWidths.push( _fnStringToCss( $(nSizer).css('width') ) );
}, footerSrcEls );
_fnApplyToChildren( function(nToSize, i) {
nToSize.style.width = footerWidths[i];
}, footerTrgEls );
$(footerSrcEls).height(0);
}
/*
* 3. Apply the measurements
*/
// "Hide" the header and footer that we used for the sizing. We need to keep
// the content of the cell so that the width applied to the header and body
// both match, but we want to hide it completely. We want to also fix their
// width to what they currently are
_fnApplyToChildren( function(nSizer, i) {
nSizer.innerHTML = '<div class="dataTables_sizing" style="height:0;overflow:hidden;">'+headerContent[i]+'</div>';
nSizer.style.width = headerWidths[i];
}, headerSrcEls );
if ( footer )
{
_fnApplyToChildren( function(nSizer, i) {
nSizer.innerHTML = '<div class="dataTables_sizing" style="height:0;overflow:hidden;">'+footerContent[i]+'</div>';
nSizer.style.width = footerWidths[i];
}, footerSrcEls );
}
// Sanity check that the table is of a sensible width. If not then we are going to get
// misalignment - try to prevent this by not allowing the table to shrink below its min width
if ( table.outerWidth() < sanityWidth )
{
// The min width depends upon if we have a vertical scrollbar visible or not */
correction = ((divBodyEl.scrollHeight > divBodyEl.offsetHeight ||
divBody.css('overflow-y') == "scroll")) ?
sanityWidth+barWidth :
sanityWidth;
// IE6/7 are a law unto themselves...
if ( ie67 && (divBodyEl.scrollHeight >
divBodyEl.offsetHeight || divBody.css('overflow-y') == "scroll")
) {
tableStyle.width = _fnStringToCss( correction-barWidth );
}
// And give the user a warning that we've stopped the table getting too small
if ( scrollX === "" || scrollXInner !== "" ) {
_fnLog( settings, 1, 'Possible column misalignment', 6 );
}
}
else
{
correction = '100%';
}
// Apply to the container elements
divBodyStyle.width = _fnStringToCss( correction );
divHeaderStyle.width = _fnStringToCss( correction );
if ( footer ) {
settings.nScrollFoot.style.width = _fnStringToCss( correction );
}
/*
* 4. Clean up
*/
if ( ! scrollY ) {
/* IE7< puts a vertical scrollbar in place (when it shouldn't be) due to subtracting
* the scrollbar height from the visible display, rather than adding it on. We need to
* set the height in order to sort this. Don't want to do it in any other browsers.
*/
if ( ie67 ) {
divBodyStyle.height = _fnStringToCss( tableEl.offsetHeight+barWidth );
}
}
/* Finally set the width's of the header and footer tables */
var iOuterWidth = table.outerWidth();
divHeaderTable[0].style.width = _fnStringToCss( iOuterWidth );
divHeaderInnerStyle.width = _fnStringToCss( iOuterWidth );
// Figure out if there are scrollbar present - if so then we need a the header and footer to
// provide a bit more space to allow "overflow" scrolling (i.e. past the scrollbar)
var bScrolling = table.height() > divBodyEl.clientHeight || divBody.css('overflow-y') == "scroll";
var padding = 'padding' + (browser.bScrollbarLeft ? 'Left' : 'Right' );
divHeaderInnerStyle[ padding ] = bScrolling ? barWidth+"px" : "0px";
if ( footer ) {
divFooterTable[0].style.width = _fnStringToCss( iOuterWidth );
divFooterInner[0].style.width = _fnStringToCss( iOuterWidth );
divFooterInner[0].style[padding] = bScrolling ? barWidth+"px" : "0px";
}
// Correct DOM ordering for colgroup - comes before the thead
table.children('colgroup').insertBefore( table.children('thead') );
/* Adjust the position of the header in case we loose the y-scrollbar */
divBody.scroll();
// If sorting or filtering has occurred, jump the scrolling back to the top
// only if we aren't holding the position
if ( (settings.bSorted || settings.bFiltered) && ! settings._drawHold ) {
divBodyEl.scrollTop = 0;
}
}
/**
* Apply a given function to the display child nodes of an element array (typically
* TD children of TR rows
* @param {function} fn Method to apply to the objects
* @param array {nodes} an1 List of elements to look through for display children
* @param array {nodes} an2 Another list (identical structure to the first) - optional
* @memberof DataTable#oApi
*/
function _fnApplyToChildren( fn, an1, an2 )
{
var index=0, i=0, iLen=an1.length;
var nNode1, nNode2;
while ( i < iLen ) {
nNode1 = an1[i].firstChild;
nNode2 = an2 ? an2[i].firstChild : null;
while ( nNode1 ) {
if ( nNode1.nodeType === 1 ) {
if ( an2 ) {
fn( nNode1, nNode2, index );
}
else {
fn( nNode1, index );
}
index++;
}
nNode1 = nNode1.nextSibling;
nNode2 = an2 ? nNode2.nextSibling : null;
}
i++;
}
}
var __re_html_remove = /<.*?>/g;
/**
* Calculate the width of columns for the table
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnCalculateColumnWidths ( oSettings )
{
var
table = oSettings.nTable,
columns = oSettings.aoColumns,
scroll = oSettings.oScroll,
scrollY = scroll.sY,
scrollX = scroll.sX,
scrollXInner = scroll.sXInner,
columnCount = columns.length,
visibleColumns = _fnGetColumns( oSettings, 'bVisible' ),
headerCells = $('th', oSettings.nTHead),
tableWidthAttr = table.getAttribute('width'), // from DOM element
tableContainer = table.parentNode,
userInputs = false,
i, column, columnIdx, width, outerWidth,
browser = oSettings.oBrowser,
ie67 = browser.bScrollOversize;
var styleWidth = table.style.width;
if ( styleWidth && styleWidth.indexOf('%') !== -1 ) {
tableWidthAttr = styleWidth;
}
/* Convert any user input sizes into pixel sizes */
for ( i=0 ; i<visibleColumns.length ; i++ ) {
column = columns[ visibleColumns[i] ];
if ( column.sWidth !== null ) {
column.sWidth = _fnConvertToWidth( column.sWidthOrig, tableContainer );
userInputs = true;
}
}
/* If the number of columns in the DOM equals the number that we have to
* process in DataTables, then we can use the offsets that are created by
* the web- browser. No custom sizes can be set in order for this to happen,
* nor scrolling used
*/
if ( ie67 || ! userInputs && ! scrollX && ! scrollY &&
columnCount == _fnVisbleColumns( oSettings ) &&
columnCount == headerCells.length
) {
for ( i=0 ; i<columnCount ; i++ ) {
var colIdx = _fnVisibleToColumnIndex( oSettings, i );
if ( colIdx !== null ) {
columns[ colIdx ].sWidth = _fnStringToCss( headerCells.eq(i).width() );
}
}
}
else
{
// Otherwise construct a single row, worst case, table with the widest
// node in the data, assign any user defined widths, then insert it into
// the DOM and allow the browser to do all the hard work of calculating
// table widths
var tmpTable = $(table).clone() // don't use cloneNode - IE8 will remove events on the main table
.css( 'visibility', 'hidden' )
.removeAttr( 'id' );
// Clean up the table body
tmpTable.find('tbody tr').remove();
var tr = $('<tr/>').appendTo( tmpTable.find('tbody') );
// Clone the table header and footer - we can't use the header / footer
// from the cloned table, since if scrolling is active, the table's
// real header and footer are contained in different table tags
tmpTable.find('thead, tfoot').remove();
tmpTable
.append( $(oSettings.nTHead).clone() )
.append( $(oSettings.nTFoot).clone() );
// Remove any assigned widths from the footer (from scrolling)
tmpTable.find('tfoot th, tfoot td').css('width', '');
// Apply custom sizing to the cloned header
headerCells = _fnGetUniqueThs( oSettings, tmpTable.find('thead')[0] );
for ( i=0 ; i<visibleColumns.length ; i++ ) {
column = columns[ visibleColumns[i] ];
headerCells[i].style.width = column.sWidthOrig !== null && column.sWidthOrig !== '' ?
_fnStringToCss( column.sWidthOrig ) :
'';
// For scrollX we need to force the column width otherwise the
// browser will collapse it. If this width is smaller than the
// width the column requires, then it will have no effect
if ( column.sWidthOrig && scrollX ) {
$( headerCells[i] ).append( $('<div/>').css( {
width: column.sWidthOrig,
margin: 0,
padding: 0,
border: 0,
height: 1
} ) );
}
}
// Find the widest cell for each column and put it into the table
if ( oSettings.aoData.length ) {
for ( i=0 ; i<visibleColumns.length ; i++ ) {
columnIdx = visibleColumns[i];
column = columns[ columnIdx ];
$( _fnGetWidestNode( oSettings, columnIdx ) )
.clone( false )
.append( column.sContentPadding )
.appendTo( tr );
}
}
// Tidy the temporary table - remove name attributes so there aren't
// duplicated in the dom (radio elements for example)
$('[name]', tmpTable).removeAttr('name');
// Table has been built, attach to the document so we can work with it.
// A holding element is used, positioned at the top of the container
// with minimal height, so it has no effect on if the container scrolls
// or not. Otherwise it might trigger scrolling when it actually isn't
// needed
var holder = $('<div/>').css( scrollX || scrollY ?
{
position: 'absolute',
top: 0,
left: 0,
height: 1,
right: 0,
overflow: 'hidden'
} :
{}
)
.append( tmpTable )
.appendTo( tableContainer );
// When scrolling (X or Y) we want to set the width of the table as
// appropriate. However, when not scrolling leave the table width as it
// is. This results in slightly different, but I think correct behaviour
if ( scrollX && scrollXInner ) {
tmpTable.width( scrollXInner );
}
else if ( scrollX ) {
tmpTable.css( 'width', 'auto' );
tmpTable.removeAttr('width');
// If there is no width attribute or style, then allow the table to
// collapse
if ( tmpTable.width() < tableContainer.clientWidth && tableWidthAttr ) {
tmpTable.width( tableContainer.clientWidth );
}
}
else if ( scrollY ) {
tmpTable.width( tableContainer.clientWidth );
}
else if ( tableWidthAttr ) {
tmpTable.width( tableWidthAttr );
}
// Get the width of each column in the constructed table - we need to
// know the inner width (so it can be assigned to the other table's
// cells) and the outer width so we can calculate the full width of the
// table. This is safe since DataTables requires a unique cell for each
// column, but if ever a header can span multiple columns, this will
// need to be modified.
var total = 0;
for ( i=0 ; i<visibleColumns.length ; i++ ) {
var cell = $(headerCells[i]);
var border = cell.outerWidth() - cell.width();
// Use getBounding... where possible (not IE8-) because it can give
// sub-pixel accuracy, which we then want to round up!
var bounding = browser.bBounding ?
Math.ceil( headerCells[i].getBoundingClientRect().width ) :
cell.outerWidth();
// Total is tracked to remove any sub-pixel errors as the outerWidth
// of the table might not equal the total given here (IE!).
total += bounding;
// Width for each column to use
columns[ visibleColumns[i] ].sWidth = _fnStringToCss( bounding - border );
}
table.style.width = _fnStringToCss( total );
// Finished with the table - ditch it
holder.remove();
}
// If there is a width attr, we want to attach an event listener which
// allows the table sizing to automatically adjust when the window is
// resized. Use the width attr rather than CSS, since we can't know if the
// CSS is a relative value or absolute - DOM read is always px.
if ( tableWidthAttr ) {
table.style.width = _fnStringToCss( tableWidthAttr );
}
if ( (tableWidthAttr || scrollX) && ! oSettings._reszEvt ) {
var bindResize = function () {
$(window).on('resize.DT-'+oSettings.sInstance, _fnThrottle( function () {
_fnAdjustColumnSizing( oSettings );
} ) );
};
// IE6/7 will crash if we bind a resize event handler on page load.
// To be removed in 1.11 which drops IE6/7 support
if ( ie67 ) {
setTimeout( bindResize, 1000 );
}
else {
bindResize();
}
oSettings._reszEvt = true;
}
}
/**
* Throttle the calls to a function. Arguments and context are maintained for
* the throttled function
* @param {function} fn Function to be called
* @param {int} [freq=200] call frequency in mS
* @returns {function} wrapped function
* @memberof DataTable#oApi
*/
var _fnThrottle = DataTable.util.throttle;
/**
* Convert a CSS unit width to pixels (e.g. 2em)
* @param {string} width width to be converted
* @param {node} parent parent to get the with for (required for relative widths) - optional
* @returns {int} width in pixels
* @memberof DataTable#oApi
*/
function _fnConvertToWidth ( width, parent )
{
if ( ! width ) {
return 0;
}
var n = $('<div/>')
.css( 'width', _fnStringToCss( width ) )
.appendTo( parent || document.body );
var val = n[0].offsetWidth;
n.remove();
return val;
}
/**
* Get the widest node
* @param {object} settings dataTables settings object
* @param {int} colIdx column of interest
* @returns {node} widest table node
* @memberof DataTable#oApi
*/
function _fnGetWidestNode( settings, colIdx )
{
var idx = _fnGetMaxLenString( settings, colIdx );
if ( idx < 0 ) {
return null;
}
var data = settings.aoData[ idx ];
return ! data.nTr ? // Might not have been created when deferred rendering
$('<td/>').html( _fnGetCellData( settings, idx, colIdx, 'display' ) )[0] :
data.anCells[ colIdx ];
}
/**
* Get the maximum strlen for each data column
* @param {object} settings dataTables settings object
* @param {int} colIdx column of interest
* @returns {string} max string length for each column
* @memberof DataTable#oApi
*/
function _fnGetMaxLenString( settings, colIdx )
{
var s, max=-1, maxIdx = -1;
for ( var i=0, ien=settings.aoData.length ; i<ien ; i++ ) {
s = _fnGetCellData( settings, i, colIdx, 'display' )+'';
s = s.replace( __re_html_remove, '' );
s = s.replace( / /g, ' ' );
if ( s.length > max ) {
max = s.length;
maxIdx = i;
}
}
return maxIdx;
}
/**
* Append a CSS unit (only if required) to a string
* @param {string} value to css-ify
* @returns {string} value with css unit
* @memberof DataTable#oApi
*/
function _fnStringToCss( s )
{
if ( s === null ) {
return '0px';
}
if ( typeof s == 'number' ) {
return s < 0 ?
'0px' :
s+'px';
}
// Check it has a unit character already
return s.match(/\d$/) ?
s+'px' :
s;
}
function _fnSortFlatten ( settings )
{
var
i, iLen, k, kLen,
aSort = [],
aiOrig = [],
aoColumns = settings.aoColumns,
aDataSort, iCol, sType, srcCol,
fixed = settings.aaSortingFixed,
fixedObj = $.isPlainObject( fixed ),
nestedSort = [],
add = function ( a ) {
if ( a.length && ! $.isArray( a[0] ) ) {
// 1D array
nestedSort.push( a );
}
else {
// 2D array
$.merge( nestedSort, a );
}
};
// Build the sort array, with pre-fix and post-fix options if they have been
// specified
if ( $.isArray( fixed ) ) {
add( fixed );
}
if ( fixedObj && fixed.pre ) {
add( fixed.pre );
}
add( settings.aaSorting );
if (fixedObj && fixed.post ) {
add( fixed.post );
}
for ( i=0 ; i<nestedSort.length ; i++ )
{
srcCol = nestedSort[i][0];
aDataSort = aoColumns[ srcCol ].aDataSort;
for ( k=0, kLen=aDataSort.length ; k<kLen ; k++ )
{
iCol = aDataSort[k];
sType = aoColumns[ iCol ].sType || 'string';
if ( nestedSort[i]._idx === undefined ) {
nestedSort[i]._idx = $.inArray( nestedSort[i][1], aoColumns[iCol].asSorting );
}
aSort.push( {
src: srcCol,
col: iCol,
dir: nestedSort[i][1],
index: nestedSort[i]._idx,
type: sType,
formatter: DataTable.ext.type.order[ sType+"-pre" ]
} );
}
}
return aSort;
}
/**
* Change the order of the table
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
* @todo This really needs split up!
*/
function _fnSort ( oSettings )
{
var
i, ien, iLen, j, jLen, k, kLen,
sDataType, nTh,
aiOrig = [],
oExtSort = DataTable.ext.type.order,
aoData = oSettings.aoData,
aoColumns = oSettings.aoColumns,
aDataSort, data, iCol, sType, oSort,
formatters = 0,
sortCol,
displayMaster = oSettings.aiDisplayMaster,
aSort;
// Resolve any column types that are unknown due to addition or invalidation
// @todo Can this be moved into a 'data-ready' handler which is called when
// data is going to be used in the table?
_fnColumnTypes( oSettings );
aSort = _fnSortFlatten( oSettings );
for ( i=0, ien=aSort.length ; i<ien ; i++ ) {
sortCol = aSort[i];
// Track if we can use the fast sort algorithm
if ( sortCol.formatter ) {
formatters++;
}
// Load the data needed for the sort, for each cell
_fnSortData( oSettings, sortCol.col );
}
/* No sorting required if server-side or no sorting array */
if ( _fnDataSource( oSettings ) != 'ssp' && aSort.length !== 0 )
{
// Create a value - key array of the current row positions such that we can use their
// current position during the sort, if values match, in order to perform stable sorting
for ( i=0, iLen=displayMaster.length ; i<iLen ; i++ ) {
aiOrig[ displayMaster[i] ] = i;
}
/* Do the sort - here we want multi-column sorting based on a given data source (column)
* and sorting function (from oSort) in a certain direction. It's reasonably complex to
* follow on it's own, but this is what we want (example two column sorting):
* fnLocalSorting = function(a,b){
* var iTest;
* iTest = oSort['string-asc']('data11', 'data12');
* if (iTest !== 0)
* return iTest;
* iTest = oSort['numeric-desc']('data21', 'data22');
* if (iTest !== 0)
* return iTest;
* return oSort['numeric-asc']( aiOrig[a], aiOrig[b] );
* }
* Basically we have a test for each sorting column, if the data in that column is equal,
* test the next column. If all columns match, then we use a numeric sort on the row
* positions in the original data array to provide a stable sort.
*
* Note - I know it seems excessive to have two sorting methods, but the first is around
* 15% faster, so the second is only maintained for backwards compatibility with sorting
* methods which do not have a pre-sort formatting function.
*/
if ( formatters === aSort.length ) {
// All sort types have formatting functions
displayMaster.sort( function ( a, b ) {
var
x, y, k, test, sort,
len=aSort.length,
dataA = aoData[a]._aSortData,
dataB = aoData[b]._aSortData;
for ( k=0 ; k<len ; k++ ) {
sort = aSort[k];
x = dataA[ sort.col ];
y = dataB[ sort.col ];
test = x<y ? -1 : x>y ? 1 : 0;
if ( test !== 0 ) {
return sort.dir === 'asc' ? test : -test;
}
}
x = aiOrig[a];
y = aiOrig[b];
return x<y ? -1 : x>y ? 1 : 0;
} );
}
else {
// Depreciated - remove in 1.11 (providing a plug-in option)
// Not all sort types have formatting methods, so we have to call their sorting
// methods.
displayMaster.sort( function ( a, b ) {
var
x, y, k, l, test, sort, fn,
len=aSort.length,
dataA = aoData[a]._aSortData,
dataB = aoData[b]._aSortData;
for ( k=0 ; k<len ; k++ ) {
sort = aSort[k];
x = dataA[ sort.col ];
y = dataB[ sort.col ];
fn = oExtSort[ sort.type+"-"+sort.dir ] || oExtSort[ "string-"+sort.dir ];
test = fn( x, y );
if ( test !== 0 ) {
return test;
}
}
x = aiOrig[a];
y = aiOrig[b];
return x<y ? -1 : x>y ? 1 : 0;
} );
}
}
/* Tell the draw function that we have sorted the data */
oSettings.bSorted = true;
}
function _fnSortAria ( settings )
{
var label;
var nextSort;
var columns = settings.aoColumns;
var aSort = _fnSortFlatten( settings );
var oAria = settings.oLanguage.oAria;
// ARIA attributes - need to loop all columns, to update all (removing old
// attributes as needed)
for ( var i=0, iLen=columns.length ; i<iLen ; i++ )
{
var col = columns[i];
var asSorting = col.asSorting;
var sTitle = col.sTitle.replace( /<.*?>/g, "" );
var th = col.nTh;
// IE7 is throwing an error when setting these properties with jQuery's
// attr() and removeAttr() methods...
th.removeAttribute('aria-sort');
/* In ARIA only the first sorting column can be marked as sorting - no multi-sort option */
if ( col.bSortable ) {
if ( aSort.length > 0 && aSort[0].col == i ) {
th.setAttribute('aria-sort', aSort[0].dir=="asc" ? "ascending" : "descending" );
nextSort = asSorting[ aSort[0].index+1 ] || asSorting[0];
}
else {
nextSort = asSorting[0];
}
label = sTitle + ( nextSort === "asc" ?
oAria.sSortAscending :
oAria.sSortDescending
);
}
else {
label = sTitle;
}
th.setAttribute('aria-label', label);
}
}
/**
* Function to run on user sort request
* @param {object} settings dataTables settings object
* @param {node} attachTo node to attach the handler to
* @param {int} colIdx column sorting index
* @param {boolean} [append=false] Append the requested sort to the existing
* sort if true (i.e. multi-column sort)
* @param {function} [callback] callback function
* @memberof DataTable#oApi
*/
function _fnSortListener ( settings, colIdx, append, callback )
{
var col = settings.aoColumns[ colIdx ];
var sorting = settings.aaSorting;
var asSorting = col.asSorting;
var nextSortIdx;
var next = function ( a, overflow ) {
var idx = a._idx;
if ( idx === undefined ) {
idx = $.inArray( a[1], asSorting );
}
return idx+1 < asSorting.length ?
idx+1 :
overflow ?
null :
0;
};
// Convert to 2D array if needed
if ( typeof sorting[0] === 'number' ) {
sorting = settings.aaSorting = [ sorting ];
}
// If appending the sort then we are multi-column sorting
if ( append && settings.oFeatures.bSortMulti ) {
// Are we already doing some kind of sort on this column?
var sortIdx = $.inArray( colIdx, _pluck(sorting, '0') );
if ( sortIdx !== -1 ) {
// Yes, modify the sort
nextSortIdx = next( sorting[sortIdx], true );
if ( nextSortIdx === null && sorting.length === 1 ) {
nextSortIdx = 0; // can't remove sorting completely
}
if ( nextSortIdx === null ) {
sorting.splice( sortIdx, 1 );
}
else {
sorting[sortIdx][1] = asSorting[ nextSortIdx ];
sorting[sortIdx]._idx = nextSortIdx;
}
}
else {
// No sort on this column yet
sorting.push( [ colIdx, asSorting[0], 0 ] );
sorting[sorting.length-1]._idx = 0;
}
}
else if ( sorting.length && sorting[0][0] == colIdx ) {
// Single column - already sorting on this column, modify the sort
nextSortIdx = next( sorting[0] );
sorting.length = 1;
sorting[0][1] = asSorting[ nextSortIdx ];
sorting[0]._idx = nextSortIdx;
}
else {
// Single column - sort only on this column
sorting.length = 0;
sorting.push( [ colIdx, asSorting[0] ] );
sorting[0]._idx = 0;
}
// Run the sort by calling a full redraw
_fnReDraw( settings );
// callback used for async user interaction
if ( typeof callback == 'function' ) {
callback( settings );
}
}
/**
* Attach a sort handler (click) to a node
* @param {object} settings dataTables settings object
* @param {node} attachTo node to attach the handler to
* @param {int} colIdx column sorting index
* @param {function} [callback] callback function
* @memberof DataTable#oApi
*/
function _fnSortAttachListener ( settings, attachTo, colIdx, callback )
{
var col = settings.aoColumns[ colIdx ];
_fnBindAction( attachTo, {}, function (e) {
/* If the column is not sortable - don't to anything */
if ( col.bSortable === false ) {
return;
}
// If processing is enabled use a timeout to allow the processing
// display to be shown - otherwise to it synchronously
if ( settings.oFeatures.bProcessing ) {
_fnProcessingDisplay( settings, true );
setTimeout( function() {
_fnSortListener( settings, colIdx, e.shiftKey, callback );
// In server-side processing, the draw callback will remove the
// processing display
if ( _fnDataSource( settings ) !== 'ssp' ) {
_fnProcessingDisplay( settings, false );
}
}, 0 );
}
else {
_fnSortListener( settings, colIdx, e.shiftKey, callback );
}
} );
}
/**
* Set the sorting classes on table's body, Note: it is safe to call this function
* when bSort and bSortClasses are false
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnSortingClasses( settings )
{
var oldSort = settings.aLastSort;
var sortClass = settings.oClasses.sSortColumn;
var sort = _fnSortFlatten( settings );
var features = settings.oFeatures;
var i, ien, colIdx;
if ( features.bSort && features.bSortClasses ) {
// Remove old sorting classes
for ( i=0, ien=oldSort.length ; i<ien ; i++ ) {
colIdx = oldSort[i].src;
// Remove column sorting
$( _pluck( settings.aoData, 'anCells', colIdx ) )
.removeClass( sortClass + (i<2 ? i+1 : 3) );
}
// Add new column sorting
for ( i=0, ien=sort.length ; i<ien ; i++ ) {
colIdx = sort[i].src;
$( _pluck( settings.aoData, 'anCells', colIdx ) )
.addClass( sortClass + (i<2 ? i+1 : 3) );
}
}
settings.aLastSort = sort;
}
// Get the data to sort a column, be it from cache, fresh (populating the
// cache), or from a sort formatter
function _fnSortData( settings, idx )
{
// Custom sorting function - provided by the sort data type
var column = settings.aoColumns[ idx ];
var customSort = DataTable.ext.order[ column.sSortDataType ];
var customData;
if ( customSort ) {
customData = customSort.call( settings.oInstance, settings, idx,
_fnColumnIndexToVisible( settings, idx )
);
}
// Use / populate cache
var row, cellData;
var formatter = DataTable.ext.type.order[ column.sType+"-pre" ];
for ( var i=0, ien=settings.aoData.length ; i<ien ; i++ ) {
row = settings.aoData[i];
if ( ! row._aSortData ) {
row._aSortData = [];
}
if ( ! row._aSortData[idx] || customSort ) {
cellData = customSort ?
customData[i] : // If there was a custom sort function, use data from there
_fnGetCellData( settings, i, idx, 'sort' );
row._aSortData[ idx ] = formatter ?
formatter( cellData ) :
cellData;
}
}
}
/**
* Save the state of a table
* @param {object} oSettings dataTables settings object
* @memberof DataTable#oApi
*/
function _fnSaveState ( settings )
{
if ( !settings.oFeatures.bStateSave || settings.bDestroying )
{
return;
}
/* Store the interesting variables */
var state = {
time: +new Date(),
start: settings._iDisplayStart,
length: settings._iDisplayLength,
order: $.extend( true, [], settings.aaSorting ),
search: _fnSearchToCamel( settings.oPreviousSearch ),
columns: $.map( settings.aoColumns, function ( col, i ) {
return {
visible: col.bVisible,
search: _fnSearchToCamel( settings.aoPreSearchCols[i] )
};
} )
};
_fnCallbackFire( settings, "aoStateSaveParams", 'stateSaveParams', [settings, state] );
settings.oSavedState = state;
settings.fnStateSaveCallback.call( settings.oInstance, settings, state );
}
/**
* Attempt to load a saved table state
* @param {object} oSettings dataTables settings object
* @param {object} oInit DataTables init object so we can override settings
* @param {function} callback Callback to execute when the state has been loaded
* @memberof DataTable#oApi
*/
function _fnLoadState ( settings, oInit, callback )
{
var i, ien;
var columns = settings.aoColumns;
var loaded = function ( s ) {
if ( ! s || ! s.time ) {
callback();
return;
}
// Allow custom and plug-in manipulation functions to alter the saved data set and
// cancelling of loading by returning false
var abStateLoad = _fnCallbackFire( settings, 'aoStateLoadParams', 'stateLoadParams', [settings, s] );
if ( $.inArray( false, abStateLoad ) !== -1 ) {
callback();
return;
}
// Reject old data
var duration = settings.iStateDuration;
if ( duration > 0 && s.time < +new Date() - (duration*1000) ) {
callback();
return;
}
// Number of columns have changed - all bets are off, no restore of settings
if ( s.columns && columns.length !== s.columns.length ) {
callback();
return;
}
// Store the saved state so it might be accessed at any time
settings.oLoadedState = $.extend( true, {}, s );
// Restore key features - todo - for 1.11 this needs to be done by
// subscribed events
if ( s.start !== undefined ) {
settings._iDisplayStart = s.start;
settings.iInitDisplayStart = s.start;
}
if ( s.length !== undefined ) {
settings._iDisplayLength = s.length;
}
// Order
if ( s.order !== undefined ) {
settings.aaSorting = [];
$.each( s.order, function ( i, col ) {
settings.aaSorting.push( col[0] >= columns.length ?
[ 0, col[1] ] :
col
);
} );
}
// Search
if ( s.search !== undefined ) {
$.extend( settings.oPreviousSearch, _fnSearchToHung( s.search ) );
}
// Columns
//
if ( s.columns ) {
for ( i=0, ien=s.columns.length ; i<ien ; i++ ) {
var col = s.columns[i];
// Visibility
if ( col.visible !== undefined ) {
columns[i].bVisible = col.visible;
}
// Search
if ( col.search !== undefined ) {
$.extend( settings.aoPreSearchCols[i], _fnSearchToHung( col.search ) );
}
}
}
_fnCallbackFire( settings, 'aoStateLoaded', 'stateLoaded', [settings, s] );
callback();
}
if ( ! settings.oFeatures.bStateSave ) {
callback();
return;
}
var state = settings.fnStateLoadCallback.call( settings.oInstance, settings, loaded );
if ( state !== undefined ) {
loaded( state );
}
// otherwise, wait for the loaded callback to be executed
}
/**
* Return the settings object for a particular table
* @param {node} table table we are using as a dataTable
* @returns {object} Settings object - or null if not found
* @memberof DataTable#oApi
*/
function _fnSettingsFromNode ( table )
{
var settings = DataTable.settings;
var idx = $.inArray( table, _pluck( settings, 'nTable' ) );
return idx !== -1 ?
settings[ idx ] :
null;
}
/**
* Log an error message
* @param {object} settings dataTables settings object
* @param {int} level log error messages, or display them to the user
* @param {string} msg error message
* @param {int} tn Technical note id to get more information about the error.
* @memberof DataTable#oApi
*/
function _fnLog( settings, level, msg, tn )
{
msg = 'DataTables warning: '+
(settings ? 'table id='+settings.sTableId+' - ' : '')+msg;
if ( tn ) {
msg += '. For more information about this error, please see '+
'http://datatables.net/tn/'+tn;
}
if ( ! level ) {
// Backwards compatibility pre 1.10
var ext = DataTable.ext;
var type = ext.sErrMode || ext.errMode;
if ( settings ) {
_fnCallbackFire( settings, null, 'error', [ settings, tn, msg ] );
}
/*
if ( type == 'alert' ) {
alert( msg );
}*/
else if ( type == 'throw' ) {
throw new Error(msg);
}
else if ( typeof type == 'function' ) {
type( settings, tn, msg );
}
}
else if ( window.console && console.log ) {
console.log( msg );
}
}
/**
* See if a property is defined on one object, if so assign it to the other object
* @param {object} ret target object
* @param {object} src source object
* @param {string} name property
* @param {string} [mappedName] name to map too - optional, name used if not given
* @memberof DataTable#oApi
*/
function _fnMap( ret, src, name, mappedName )
{
if ( $.isArray( name ) ) {
$.each( name, function (i, val) {
if ( $.isArray( val ) ) {
_fnMap( ret, src, val[0], val[1] );
}
else {
_fnMap( ret, src, val );
}
} );
return;
}
if ( mappedName === undefined ) {
mappedName = name;
}
if ( src[name] !== undefined ) {
ret[mappedName] = src[name];
}
}
/**
* Extend objects - very similar to jQuery.extend, but deep copy objects, and
* shallow copy arrays. The reason we need to do this, is that we don't want to
* deep copy array init values (such as aaSorting) since the dev wouldn't be
* able to override them, but we do want to deep copy arrays.
* @param {object} out Object to extend
* @param {object} extender Object from which the properties will be applied to
* out
* @param {boolean} breakRefs If true, then arrays will be sliced to take an
* independent copy with the exception of the `data` or `aaData` parameters
* if they are present. This is so you can pass in a collection to
* DataTables and have that used as your data source without breaking the
* references
* @returns {object} out Reference, just for convenience - out === the return.
* @memberof DataTable#oApi
* @todo This doesn't take account of arrays inside the deep copied objects.
*/
function _fnExtend( out, extender, breakRefs )
{
var val;
for ( var prop in extender ) {
if ( extender.hasOwnProperty(prop) ) {
val = extender[prop];
if ( $.isPlainObject( val ) ) {
if ( ! $.isPlainObject( out[prop] ) ) {
out[prop] = {};
}
$.extend( true, out[prop], val );
}
else if ( breakRefs && prop !== 'data' && prop !== 'aaData' && $.isArray(val) ) {
out[prop] = val.slice();
}
else {
out[prop] = val;
}
}
}
return out;
}
/**
* Bind an event handers to allow a click or return key to activate the callback.
* This is good for accessibility since a return on the keyboard will have the
* same effect as a click, if the element has focus.
* @param {element} n Element to bind the action to
* @param {object} oData Data object to pass to the triggered function
* @param {function} fn Callback function for when the event is triggered
* @memberof DataTable#oApi
*/
function _fnBindAction( n, oData, fn )
{
$(n)
.on( 'click.DT', oData, function (e) {
n.blur(); // Remove focus outline for mouse users
fn(e);
} )
.on( 'keypress.DT', oData, function (e){
if ( e.which === 13 ) {
e.preventDefault();
fn(e);
}
} )
.on( 'selectstart.DT', function () {
/* Take the brutal approach to cancelling text selection */
return false;
} );
}
/**
* Register a callback function. Easily allows a callback function to be added to
* an array store of callback functions that can then all be called together.
* @param {object} oSettings dataTables settings object
* @param {string} sStore Name of the array storage for the callbacks in oSettings
* @param {function} fn Function to be called back
* @param {string} sName Identifying name for the callback (i.e. a label)
* @memberof DataTable#oApi
*/
function _fnCallbackReg( oSettings, sStore, fn, sName )
{
if ( fn )
{
oSettings[sStore].push( {
"fn": fn,
"sName": sName
} );
}
}
/**
* Fire callback functions and trigger events. Note that the loop over the
* callback array store is done backwards! Further note that you do not want to
* fire off triggers in time sensitive applications (for example cell creation)
* as its slow.
* @param {object} settings dataTables settings object
* @param {string} callbackArr Name of the array storage for the callbacks in
* oSettings
* @param {string} eventName Name of the jQuery custom event to trigger. If
* null no trigger is fired
* @param {array} args Array of arguments to pass to the callback function /
* trigger
* @memberof DataTable#oApi
*/
function _fnCallbackFire( settings, callbackArr, eventName, args )
{
var ret = [];
if ( callbackArr ) {
ret = $.map( settings[callbackArr].slice().reverse(), function (val, i) {
return val.fn.apply( settings.oInstance, args );
} );
}
if ( eventName !== null ) {
var e = $.Event( eventName+'.dt' );
$(settings.nTable).trigger( e, args );
ret.push( e.result );
}
return ret;
}
function _fnLengthOverflow ( settings )
{
var
start = settings._iDisplayStart,
end = settings.fnDisplayEnd(),
len = settings._iDisplayLength;
/* If we have space to show extra rows (backing up from the end point - then do so */
if ( start >= end )
{
start = end - len;
}
// Keep the start record on the current page
start -= (start % len);
if ( len === -1 || start < 0 )
{
start = 0;
}
settings._iDisplayStart = start;
}
function _fnRenderer( settings, type )
{
var renderer = settings.renderer;
var host = DataTable.ext.renderer[type];
if ( $.isPlainObject( renderer ) && renderer[type] ) {
// Specific renderer for this type. If available use it, otherwise use
// the default.
return host[renderer[type]] || host._;
}
else if ( typeof renderer === 'string' ) {
// Common renderer - if there is one available for this type use it,
// otherwise use the default
return host[renderer] || host._;
}
// Use the default
return host._;
}
/**
* Detect the data source being used for the table. Used to simplify the code
* a little (ajax) and to make it compress a little smaller.
*
* @param {object} settings dataTables settings object
* @returns {string} Data source
* @memberof DataTable#oApi
*/
function _fnDataSource ( settings )
{
if ( settings.oFeatures.bServerSide ) {
return 'ssp';
}
else if ( settings.ajax || settings.sAjaxSource ) {
return 'ajax';
}
return 'dom';
}
/**
* Computed structure of the DataTables API, defined by the options passed to
* `DataTable.Api.register()` when building the API.
*
* The structure is built in order to speed creation and extension of the Api
* objects since the extensions are effectively pre-parsed.
*
* The array is an array of objects with the following structure, where this
* base array represents the Api prototype base:
*
* [
* {
* name: 'data' -- string - Property name
* val: function () {}, -- function - Api method (or undefined if just an object
* methodExt: [ ... ], -- array - Array of Api object definitions to extend the method result
* propExt: [ ... ] -- array - Array of Api object definitions to extend the property
* },
* {
* name: 'row'
* val: {},
* methodExt: [ ... ],
* propExt: [
* {
* name: 'data'
* val: function () {},
* methodExt: [ ... ],
* propExt: [ ... ]
* },
* ...
* ]
* }
* ]
*
* @type {Array}
* @ignore
*/
var __apiStruct = [];
/**
* `Array.prototype` reference.
*
* @type object
* @ignore
*/
var __arrayProto = Array.prototype;
/**
* Abstraction for `context` parameter of the `Api` constructor to allow it to
* take several different forms for ease of use.
*
* Each of the input parameter types will be converted to a DataTables settings
* object where possible.
*
* @param {string|node|jQuery|object} mixed DataTable identifier. Can be one
* of:
*
* * `string` - jQuery selector. Any DataTables' matching the given selector
* with be found and used.
* * `node` - `TABLE` node which has already been formed into a DataTable.
* * `jQuery` - A jQuery object of `TABLE` nodes.
* * `object` - DataTables settings object
* * `DataTables.Api` - API instance
* @return {array|null} Matching DataTables settings objects. `null` or
* `undefined` is returned if no matching DataTable is found.
* @ignore
*/
var _toSettings = function ( mixed )
{
var idx, jq;
var settings = DataTable.settings;
var tables = $.map( settings, function (el, i) {
return el.nTable;
} );
if ( ! mixed ) {
return [];
}
else if ( mixed.nTable && mixed.oApi ) {
// DataTables settings object
return [ mixed ];
}
else if ( mixed.nodeName && mixed.nodeName.toLowerCase() === 'table' ) {
// Table node
idx = $.inArray( mixed, tables );
return idx !== -1 ? [ settings[idx] ] : null;
}
else if ( mixed && typeof mixed.settings === 'function' ) {
return mixed.settings().toArray();
}
else if ( typeof mixed === 'string' ) {
// jQuery selector
jq = $(mixed);
}
else if ( mixed instanceof $ ) {
// jQuery object (also DataTables instance)
jq = mixed;
}
if ( jq ) {
return jq.map( function(i) {
idx = $.inArray( this, tables );
return idx !== -1 ? settings[idx] : null;
} ).toArray();
}
};
/**
* DataTables API class - used to control and interface with one or more
* DataTables enhanced tables.
*
* The API class is heavily based on jQuery, presenting a chainable interface
* that you can use to interact with tables. Each instance of the API class has
* a "context" - i.e. the tables that it will operate on. This could be a single
* table, all tables on a page or a sub-set thereof.
*
* Additionally the API is designed to allow you to easily work with the data in
* the tables, retrieving and manipulating it as required. This is done by
* presenting the API class as an array like interface. The contents of the
* array depend upon the actions requested by each method (for example
* `rows().nodes()` will return an array of nodes, while `rows().data()` will
* return an array of objects or arrays depending upon your table's
* configuration). The API object has a number of array like methods (`push`,
* `pop`, `reverse` etc) as well as additional helper methods (`each`, `pluck`,
* `unique` etc) to assist your working with the data held in a table.
*
* Most methods (those which return an Api instance) are chainable, which means
* the return from a method call also has all of the methods available that the
* top level object had. For example, these two calls are equivalent:
*
* // Not chained
* api.row.add( {...} );
* api.draw();
*
* // Chained
* api.row.add( {...} ).draw();
*
* @class DataTable.Api
* @param {array|object|string|jQuery} context DataTable identifier. This is
* used to define which DataTables enhanced tables this API will operate on.
* Can be one of:
*
* * `string` - jQuery selector. Any DataTables' matching the given selector
* with be found and used.
* * `node` - `TABLE` node which has already been formed into a DataTable.
* * `jQuery` - A jQuery object of `TABLE` nodes.
* * `object` - DataTables settings object
* @param {array} [data] Data to initialise the Api instance with.
*
* @example
* // Direct initialisation during DataTables construction
* var api = $('#example').DataTable();
*
* @example
* // Initialisation using a DataTables jQuery object
* var api = $('#example').dataTable().api();
*
* @example
* // Initialisation as a constructor
* var api = new $.fn.DataTable.Api( 'table.dataTable' );
*/
_Api = function ( context, data )
{
if ( ! (this instanceof _Api) ) {
return new _Api( context, data );
}
var settings = [];
var ctxSettings = function ( o ) {
var a = _toSettings( o );
if ( a ) {
settings = settings.concat( a );
}
};
if ( $.isArray( context ) ) {
for ( var i=0, ien=context.length ; i<ien ; i++ ) {
ctxSettings( context[i] );
}
}
else {
ctxSettings( context );
}
// Remove duplicates
this.context = _unique( settings );
// Initial data
if ( data ) {
$.merge( this, data );
}
// selector
this.selector = {
rows: null,
cols: null,
opts: null
};
_Api.extend( this, this, __apiStruct );
};
DataTable.Api = _Api;
// Don't destroy the existing prototype, just extend it. Required for jQuery 2's
// isPlainObject.
$.extend( _Api.prototype, {
any: function ()
{
return this.count() !== 0;
},
concat: __arrayProto.concat,
context: [], // array of table settings objects
count: function ()
{
return this.flatten().length;
},
each: function ( fn )
{
for ( var i=0, ien=this.length ; i<ien; i++ ) {
fn.call( this, this[i], i, this );
}
return this;
},
eq: function ( idx )
{
var ctx = this.context;
return ctx.length > idx ?
new _Api( ctx[idx], this[idx] ) :
null;
},
filter: function ( fn )
{
var a = [];
if ( __arrayProto.filter ) {
a = __arrayProto.filter.call( this, fn, this );
}
else {
// Compatibility for browsers without EMCA-252-5 (JS 1.6)
for ( var i=0, ien=this.length ; i<ien ; i++ ) {
if ( fn.call( this, this[i], i, this ) ) {
a.push( this[i] );
}
}
}
return new _Api( this.context, a );
},
flatten: function ()
{
var a = [];
return new _Api( this.context, a.concat.apply( a, this.toArray() ) );
},
join: __arrayProto.join,
indexOf: __arrayProto.indexOf || function (obj, start)
{
for ( var i=(start || 0), ien=this.length ; i<ien ; i++ ) {
if ( this[i] === obj ) {
return i;
}
}
return -1;
},
iterator: function ( flatten, type, fn, alwaysNew ) {
var
a = [], ret,
i, ien, j, jen,
context = this.context,
rows, items, item,
selector = this.selector;
// Argument shifting
if ( typeof flatten === 'string' ) {
alwaysNew = fn;
fn = type;
type = flatten;
flatten = false;
}
for ( i=0, ien=context.length ; i<ien ; i++ ) {
var apiInst = new _Api( context[i] );
if ( type === 'table' ) {
ret = fn.call( apiInst, context[i], i );
if ( ret !== undefined ) {
a.push( ret );
}
}
else if ( type === 'columns' || type === 'rows' ) {
// this has same length as context - one entry for each table
ret = fn.call( apiInst, context[i], this[i], i );
if ( ret !== undefined ) {
a.push( ret );
}
}
else if ( type === 'column' || type === 'column-rows' || type === 'row' || type === 'cell' ) {
// columns and rows share the same structure.
// 'this' is an array of column indexes for each context
items = this[i];
if ( type === 'column-rows' ) {
rows = _selector_row_indexes( context[i], selector.opts );
}
for ( j=0, jen=items.length ; j<jen ; j++ ) {
item = items[j];
if ( type === 'cell' ) {
ret = fn.call( apiInst, context[i], item.row, item.column, i, j );
}
else {
ret = fn.call( apiInst, context[i], item, i, j, rows );
}
if ( ret !== undefined ) {
a.push( ret );
}
}
}
}
if ( a.length || alwaysNew ) {
var api = new _Api( context, flatten ? a.concat.apply( [], a ) : a );
var apiSelector = api.selector;
apiSelector.rows = selector.rows;
apiSelector.cols = selector.cols;
apiSelector.opts = selector.opts;
return api;
}
return this;
},
lastIndexOf: __arrayProto.lastIndexOf || function (obj, start)
{
// Bit cheeky...
return this.indexOf.apply( this.toArray.reverse(), arguments );
},
length: 0,
map: function ( fn )
{
var a = [];
if ( __arrayProto.map ) {
a = __arrayProto.map.call( this, fn, this );
}
else {
// Compatibility for browsers without EMCA-252-5 (JS 1.6)
for ( var i=0, ien=this.length ; i<ien ; i++ ) {
a.push( fn.call( this, this[i], i ) );
}
}
return new _Api( this.context, a );
},
pluck: function ( prop )
{
return this.map( function ( el ) {
return el[ prop ];
} );
},
pop: __arrayProto.pop,
push: __arrayProto.push,
// Does not return an API instance
reduce: __arrayProto.reduce || function ( fn, init )
{
return _fnReduce( this, fn, init, 0, this.length, 1 );
},
reduceRight: __arrayProto.reduceRight || function ( fn, init )
{
return _fnReduce( this, fn, init, this.length-1, -1, -1 );
},
reverse: __arrayProto.reverse,
// Object with rows, columns and opts
selector: null,
shift: __arrayProto.shift,
slice: function () {
return new _Api( this.context, this );
},
sort: __arrayProto.sort, // ? name - order?
splice: __arrayProto.splice,
toArray: function ()
{
return __arrayProto.slice.call( this );
},
to$: function ()
{
return $( this );
},
toJQuery: function ()
{
return $( this );
},
unique: function ()
{
return new _Api( this.context, _unique(this) );
},
unshift: __arrayProto.unshift
} );
_Api.extend = function ( scope, obj, ext )
{
// Only extend API instances and static properties of the API
if ( ! ext.length || ! obj || ( ! (obj instanceof _Api) && ! obj.__dt_wrapper ) ) {
return;
}
var
i, ien,
j, jen,
struct, inner,
methodScoping = function ( scope, fn, struc ) {
return function () {
var ret = fn.apply( scope, arguments );
// Method extension
_Api.extend( ret, ret, struc.methodExt );
return ret;
};
};
for ( i=0, ien=ext.length ; i<ien ; i++ ) {
struct = ext[i];
// Value
obj[ struct.name ] = typeof struct.val === 'function' ?
methodScoping( scope, struct.val, struct ) :
$.isPlainObject( struct.val ) ?
{} :
struct.val;
obj[ struct.name ].__dt_wrapper = true;
// Property extension
_Api.extend( scope, obj[ struct.name ], struct.propExt );
}
};
// @todo - Is there need for an augment function?
// _Api.augment = function ( inst, name )
// {
// // Find src object in the structure from the name
// var parts = name.split('.');
// _Api.extend( inst, obj );
// };
// [
// {
// name: 'data' -- string - Property name
// val: function () {}, -- function - Api method (or undefined if just an object
// methodExt: [ ... ], -- array - Array of Api object definitions to extend the method result
// propExt: [ ... ] -- array - Array of Api object definitions to extend the property
// },
// {
// name: 'row'
// val: {},
// methodExt: [ ... ],
// propExt: [
// {
// name: 'data'
// val: function () {},
// methodExt: [ ... ],
// propExt: [ ... ]
// },
// ...
// ]
// }
// ]
_Api.register = _api_register = function ( name, val )
{
if ( $.isArray( name ) ) {
for ( var j=0, jen=name.length ; j<jen ; j++ ) {
_Api.register( name[j], val );
}
return;
}
var
i, ien,
heir = name.split('.'),
struct = __apiStruct,
key, method;
var find = function ( src, name ) {
for ( var i=0, ien=src.length ; i<ien ; i++ ) {
if ( src[i].name === name ) {
return src[i];
}
}
return null;
};
for ( i=0, ien=heir.length ; i<ien ; i++ ) {
method = heir[i].indexOf('()') !== -1;
key = method ?
heir[i].replace('()', '') :
heir[i];
var src = find( struct, key );
if ( ! src ) {
src = {
name: key,
val: {},
methodExt: [],
propExt: []
};
struct.push( src );
}
if ( i === ien-1 ) {
src.val = val;
}
else {
struct = method ?
src.methodExt :
src.propExt;
}
}
};
_Api.registerPlural = _api_registerPlural = function ( pluralName, singularName, val ) {
_Api.register( pluralName, val );
_Api.register( singularName, function () {
var ret = val.apply( this, arguments );
if ( ret === this ) {
// Returned item is the API instance that was passed in, return it
return this;
}
else if ( ret instanceof _Api ) {
// New API instance returned, want the value from the first item
// in the returned array for the singular result.
return ret.length ?
$.isArray( ret[0] ) ?
new _Api( ret.context, ret[0] ) : // Array results are 'enhanced'
ret[0] :
undefined;
}
// Non-API return - just fire it back
return ret;
} );
};
/**
* Selector for HTML tables. Apply the given selector to the give array of
* DataTables settings objects.
*
* @param {string|integer} [selector] jQuery selector string or integer
* @param {array} Array of DataTables settings objects to be filtered
* @return {array}
* @ignore
*/
var __table_selector = function ( selector, a )
{
// Integer is used to pick out a table by index
if ( typeof selector === 'number' ) {
return [ a[ selector ] ];
}
// Perform a jQuery selector on the table nodes
var nodes = $.map( a, function (el, i) {
return el.nTable;
} );
return $(nodes)
.filter( selector )
.map( function (i) {
// Need to translate back from the table node to the settings
var idx = $.inArray( this, nodes );
return a[ idx ];
} )
.toArray();
};
/**
* Context selector for the API's context (i.e. the tables the API instance
* refers to.
*
* @name DataTable.Api#tables
* @param {string|integer} [selector] Selector to pick which tables the iterator
* should operate on. If not given, all tables in the current context are
* used. This can be given as a jQuery selector (for example `':gt(0)'`) to
* select multiple tables or as an integer to select a single table.
* @returns {DataTable.Api} Returns a new API instance if a selector is given.
*/
_api_register( 'tables()', function ( selector ) {
// A new instance is created if there was a selector specified
return selector ?
new _Api( __table_selector( selector, this.context ) ) :
this;
} );
_api_register( 'table()', function ( selector ) {
var tables = this.tables( selector );
var ctx = tables.context;
// Truncate to the first matched table
return ctx.length ?
new _Api( ctx[0] ) :
tables;
} );
_api_registerPlural( 'tables().nodes()', 'table().node()' , function () {
return this.iterator( 'table', function ( ctx ) {
return ctx.nTable;
}, 1 );
} );
_api_registerPlural( 'tables().body()', 'table().body()' , function () {
return this.iterator( 'table', function ( ctx ) {
return ctx.nTBody;
}, 1 );
} );
_api_registerPlural( 'tables().header()', 'table().header()' , function () {
return this.iterator( 'table', function ( ctx ) {
return ctx.nTHead;
}, 1 );
} );
_api_registerPlural( 'tables().footer()', 'table().footer()' , function () {
return this.iterator( 'table', function ( ctx ) {
return ctx.nTFoot;
}, 1 );
} );
_api_registerPlural( 'tables().containers()', 'table().container()' , function () {
return this.iterator( 'table', function ( ctx ) {
return ctx.nTableWrapper;
}, 1 );
} );
/**
* Redraw the tables in the current context.
*/
_api_register( 'draw()', function ( paging ) {
return this.iterator( 'table', function ( settings ) {
if ( paging === 'page' ) {
_fnDraw( settings );
}
else {
if ( typeof paging === 'string' ) {
paging = paging === 'full-hold' ?
false :
true;
}
_fnReDraw( settings, paging===false );
}
} );
} );
/**
* Get the current page index.
*
* @return {integer} Current page index (zero based)
*//**
* Set the current page.
*
* Note that if you attempt to show a page which does not exist, DataTables will
* not throw an error, but rather reset the paging.
*
* @param {integer|string} action The paging action to take. This can be one of:
* * `integer` - The page index to jump to
* * `string` - An action to take:
* * `first` - Jump to first page.
* * `next` - Jump to the next page
* * `previous` - Jump to previous page
* * `last` - Jump to the last page.
* @returns {DataTables.Api} this
*/
_api_register( 'page()', function ( action ) {
if ( action === undefined ) {
return this.page.info().page; // not an expensive call
}
// else, have an action to take on all tables
return this.iterator( 'table', function ( settings ) {
_fnPageChange( settings, action );
} );
} );
/**
* Paging information for the first table in the current context.
*
* If you require paging information for another table, use the `table()` method
* with a suitable selector.
*
* @return {object} Object with the following properties set:
* * `page` - Current page index (zero based - i.e. the first page is `0`)
* * `pages` - Total number of pages
* * `start` - Display index for the first record shown on the current page
* * `end` - Display index for the last record shown on the current page
* * `length` - Display length (number of records). Note that generally `start
* + length = end`, but this is not always true, for example if there are
* only 2 records to show on the final page, with a length of 10.
* * `recordsTotal` - Full data set length
* * `recordsDisplay` - Data set length once the current filtering criterion
* are applied.
*/
_api_register( 'page.info()', function ( action ) {
if ( this.context.length === 0 ) {
return undefined;
}
var
settings = this.context[0],
start = settings._iDisplayStart,
len = settings.oFeatures.bPaginate ? settings._iDisplayLength : -1,
visRecords = settings.fnRecordsDisplay(),
all = len === -1;
return {
"page": all ? 0 : Math.floor( start / len ),
"pages": all ? 1 : Math.ceil( visRecords / len ),
"start": start,
"end": settings.fnDisplayEnd(),
"length": len,
"recordsTotal": settings.fnRecordsTotal(),
"recordsDisplay": visRecords,
"serverSide": _fnDataSource( settings ) === 'ssp'
};
} );
/**
* Get the current page length.
*
* @return {integer} Current page length. Note `-1` indicates that all records
* are to be shown.
*//**
* Set the current page length.
*
* @param {integer} Page length to set. Use `-1` to show all records.
* @returns {DataTables.Api} this
*/
_api_register( 'page.len()', function ( len ) {
// Note that we can't call this function 'length()' because `length`
// is a Javascript property of functions which defines how many arguments
// the function expects.
if ( len === undefined ) {
return this.context.length !== 0 ?
this.context[0]._iDisplayLength :
undefined;
}
// else, set the page length
return this.iterator( 'table', function ( settings ) {
_fnLengthChange( settings, len );
} );
} );
var __reload = function ( settings, holdPosition, callback ) {
// Use the draw event to trigger a callback
if ( callback ) {
var api = new _Api( settings );
api.one( 'draw', function () {
callback( api.ajax.json() );
} );
}
if ( _fnDataSource( settings ) == 'ssp' ) {
_fnReDraw( settings, holdPosition );
}
else {
_fnProcessingDisplay( settings, true );
// Cancel an existing request
var xhr = settings.jqXHR;
if ( xhr && xhr.readyState !== 4 ) {
xhr.abort();
}
// Trigger xhr
_fnBuildAjax( settings, [], function( json ) {
_fnClearTable( settings );
var data = _fnAjaxDataSrc( settings, json );
for ( var i=0, ien=data.length ; i<ien ; i++ ) {
_fnAddData( settings, data[i] );
}
_fnReDraw( settings, holdPosition );
_fnProcessingDisplay( settings, false );
} );
}
};
/**
* Get the JSON response from the last Ajax request that DataTables made to the
* server. Note that this returns the JSON from the first table in the current
* context.
*
* @return {object} JSON received from the server.
*/
_api_register( 'ajax.json()', function () {
var ctx = this.context;
if ( ctx.length > 0 ) {
return ctx[0].json;
}
// else return undefined;
} );
/**
* Get the data submitted in the last Ajax request
*/
_api_register( 'ajax.params()', function () {
var ctx = this.context;
if ( ctx.length > 0 ) {
return ctx[0].oAjaxData;
}
// else return undefined;
} );
/**
* Reload tables from the Ajax data source. Note that this function will
* automatically re-draw the table when the remote data has been loaded.
*
* @param {boolean} [reset=true] Reset (default) or hold the current paging
* position. A full re-sort and re-filter is performed when this method is
* called, which is why the pagination reset is the default action.
* @returns {DataTables.Api} this
*/
_api_register( 'ajax.reload()', function ( callback, resetPaging ) {
return this.iterator( 'table', function (settings) {
__reload( settings, resetPaging===false, callback );
} );
} );
/**
* Get the current Ajax URL. Note that this returns the URL from the first
* table in the current context.
*
* @return {string} Current Ajax source URL
*//**
* Set the Ajax URL. Note that this will set the URL for all tables in the
* current context.
*
* @param {string} url URL to set.
* @returns {DataTables.Api} this
*/
_api_register( 'ajax.url()', function ( url ) {
var ctx = this.context;
if ( url === undefined ) {
// get
if ( ctx.length === 0 ) {
return undefined;
}
ctx = ctx[0];
return ctx.ajax ?
$.isPlainObject( ctx.ajax ) ?
ctx.ajax.url :
ctx.ajax :
ctx.sAjaxSource;
}
// set
return this.iterator( 'table', function ( settings ) {
if ( $.isPlainObject( settings.ajax ) ) {
settings.ajax.url = url;
}
else {
settings.ajax = url;
}
// No need to consider sAjaxSource here since DataTables gives priority
// to `ajax` over `sAjaxSource`. So setting `ajax` here, renders any
// value of `sAjaxSource` redundant.
} );
} );
/**
* Load data from the newly set Ajax URL. Note that this method is only
* available when `ajax.url()` is used to set a URL. Additionally, this method
* has the same effect as calling `ajax.reload()` but is provided for
* convenience when setting a new URL. Like `ajax.reload()` it will
* automatically redraw the table once the remote data has been loaded.
*
* @returns {DataTables.Api} this
*/
_api_register( 'ajax.url().load()', function ( callback, resetPaging ) {
// Same as a reload, but makes sense to present it for easy access after a
// url change
return this.iterator( 'table', function ( ctx ) {
__reload( ctx, resetPaging===false, callback );
} );
} );
var _selector_run = function ( type, selector, selectFn, settings, opts )
{
var
out = [], res,
a, i, ien, j, jen,
selectorType = typeof selector;
// Can't just check for isArray here, as an API or jQuery instance might be
// given with their array like look
if ( ! selector || selectorType === 'string' || selectorType === 'function' || selector.length === undefined ) {
selector = [ selector ];
}
for ( i=0, ien=selector.length ; i<ien ; i++ ) {
// Only split on simple strings - complex expressions will be jQuery selectors
a = selector[i] && selector[i].split && ! selector[i].match(/[\[\(:]/) ?
selector[i].split(',') :
[ selector[i] ];
for ( j=0, jen=a.length ; j<jen ; j++ ) {
res = selectFn( typeof a[j] === 'string' ? $.trim(a[j]) : a[j] );
if ( res && res.length ) {
out = out.concat( res );
}
}
}
// selector extensions
var ext = _ext.selector[ type ];
if ( ext.length ) {
for ( i=0, ien=ext.length ; i<ien ; i++ ) {
out = ext[i]( settings, opts, out );
}
}
return _unique( out );
};
var _selector_opts = function ( opts )
{
if ( ! opts ) {
opts = {};
}
// Backwards compatibility for 1.9- which used the terminology filter rather
// than search
if ( opts.filter && opts.search === undefined ) {
opts.search = opts.filter;
}
return $.extend( {
search: 'none',
order: 'current',
page: 'all'
}, opts );
};
var _selector_first = function ( inst )
{
// Reduce the API instance to the first item found
for ( var i=0, ien=inst.length ; i<ien ; i++ ) {
if ( inst[i].length > 0 ) {
// Assign the first element to the first item in the instance
// and truncate the instance and context
inst[0] = inst[i];
inst[0].length = 1;
inst.length = 1;
inst.context = [ inst.context[i] ];
return inst;
}
}
// Not found - return an empty instance
inst.length = 0;
return inst;
};
var _selector_row_indexes = function ( settings, opts )
{
var
i, ien, tmp, a=[],
displayFiltered = settings.aiDisplay,
displayMaster = settings.aiDisplayMaster;
var
search = opts.search, // none, applied, removed
order = opts.order, // applied, current, index (original - compatibility with 1.9)
page = opts.page; // all, current
if ( _fnDataSource( settings ) == 'ssp' ) {
// In server-side processing mode, most options are irrelevant since
// rows not shown don't exist and the index order is the applied order
// Removed is a special case - for consistency just return an empty
// array
return search === 'removed' ?
[] :
_range( 0, displayMaster.length );
}
else if ( page == 'current' ) {
// Current page implies that order=current and fitler=applied, since it is
// fairly senseless otherwise, regardless of what order and search actually
// are
for ( i=settings._iDisplayStart, ien=settings.fnDisplayEnd() ; i<ien ; i++ ) {
a.push( displayFiltered[i] );
}
}
else if ( order == 'current' || order == 'applied' ) {
a = search == 'none' ?
displayMaster.slice() : // no search
search == 'applied' ?
displayFiltered.slice() : // applied search
$.map( displayMaster, function (el, i) { // removed search
return $.inArray( el, displayFiltered ) === -1 ? el : null;
} );
}
else if ( order == 'index' || order == 'original' ) {
for ( i=0, ien=settings.aoData.length ; i<ien ; i++ ) {
if ( search == 'none' ) {
a.push( i );
}
else { // applied | removed
tmp = $.inArray( i, displayFiltered );
if ((tmp === -1 && search == 'removed') ||
(tmp >= 0 && search == 'applied') )
{
a.push( i );
}
}
}
}
return a;
};
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Rows
*
* {} - no selector - use all available rows
* {integer} - row aoData index
* {node} - TR node
* {string} - jQuery selector to apply to the TR elements
* {array} - jQuery array of nodes, or simply an array of TR nodes
*
*/
var __row_selector = function ( settings, selector, opts )
{
var rows;
var run = function ( sel ) {
var selInt = _intVal( sel );
var i, ien;
// Short cut - selector is a number and no options provided (default is
// all records, so no need to check if the index is in there, since it
// must be - dev error if the index doesn't exist).
if ( selInt !== null && ! opts ) {
return [ selInt ];
}
if ( ! rows ) {
rows = _selector_row_indexes( settings, opts );
}
if ( selInt !== null && $.inArray( selInt, rows ) !== -1 ) {
// Selector - integer
return [ selInt ];
}
else if ( sel === null || sel === undefined || sel === '' ) {
// Selector - none
return rows;
}
// Selector - function
if ( typeof sel === 'function' ) {
return $.map( rows, function (idx) {
var row = settings.aoData[ idx ];
return sel( idx, row._aData, row.nTr ) ? idx : null;
} );
}
// Get nodes in the order from the `rows` array with null values removed
var nodes = _removeEmpty(
_pluck_order( settings.aoData, rows, 'nTr' )
);
// Selector - node
if ( sel.nodeName ) {
if ( sel._DT_RowIndex !== undefined ) {
return [ sel._DT_RowIndex ]; // Property added by DT for fast lookup
}
else if ( sel._DT_CellIndex ) {
return [ sel._DT_CellIndex.row ];
}
else {
var host = $(sel).closest('*[data-dt-row]');
return host.length ?
[ host.data('dt-row') ] :
[];
}
}
// ID selector. Want to always be able to select rows by id, regardless
// of if the tr element has been created or not, so can't rely upon
// jQuery here - hence a custom implementation. This does not match
// Sizzle's fast selector or HTML4 - in HTML5 the ID can be anything,
// but to select it using a CSS selector engine (like Sizzle or
// querySelect) it would need to need to be escaped for some characters.
// DataTables simplifies this for row selectors since you can select
// only a row. A # indicates an id any anything that follows is the id -
// unescaped.
if ( typeof sel === 'string' && sel.charAt(0) === '#' ) {
// get row index from id
var rowObj = settings.aIds[ sel.replace( /^#/, '' ) ];
if ( rowObj !== undefined ) {
return [ rowObj.idx ];
}
// need to fall through to jQuery in case there is DOM id that
// matches
}
// Selector - jQuery selector string, array of nodes or jQuery object/
// As jQuery's .filter() allows jQuery objects to be passed in filter,
// it also allows arrays, so this will cope with all three options
return $(nodes)
.filter( sel )
.map( function () {
return this._DT_RowIndex;
} )
.toArray();
};
return _selector_run( 'row', selector, run, settings, opts );
};
_api_register( 'rows()', function ( selector, opts ) {
// argument shifting
if ( selector === undefined ) {
selector = '';
}
else if ( $.isPlainObject( selector ) ) {
opts = selector;
selector = '';
}
opts = _selector_opts( opts );
var inst = this.iterator( 'table', function ( settings ) {
return __row_selector( settings, selector, opts );
}, 1 );
// Want argument shifting here and in __row_selector?
inst.selector.rows = selector;
inst.selector.opts = opts;
return inst;
} );
_api_register( 'rows().nodes()', function () {
return this.iterator( 'row', function ( settings, row ) {
return settings.aoData[ row ].nTr || undefined;
}, 1 );
} );
_api_register( 'rows().data()', function () {
return this.iterator( true, 'rows', function ( settings, rows ) {
return _pluck_order( settings.aoData, rows, '_aData' );
}, 1 );
} );
_api_registerPlural( 'rows().cache()', 'row().cache()', function ( type ) {
return this.iterator( 'row', function ( settings, row ) {
var r = settings.aoData[ row ];
return type === 'search' ? r._aFilterData : r._aSortData;
}, 1 );
} );
_api_registerPlural( 'rows().invalidate()', 'row().invalidate()', function ( src ) {
return this.iterator( 'row', function ( settings, row ) {
_fnInvalidate( settings, row, src );
} );
} );
_api_registerPlural( 'rows().indexes()', 'row().index()', function () {
return this.iterator( 'row', function ( settings, row ) {
return row;
}, 1 );
} );
_api_registerPlural( 'rows().ids()', 'row().id()', function ( hash ) {
var a = [];
var context = this.context;
// `iterator` will drop undefined values, but in this case we want them
for ( var i=0, ien=context.length ; i<ien ; i++ ) {
for ( var j=0, jen=this[i].length ; j<jen ; j++ ) {
var id = context[i].rowIdFn( context[i].aoData[ this[i][j] ]._aData );
a.push( (hash === true ? '#' : '' )+ id );
}
}
return new _Api( context, a );
} );
_api_registerPlural( 'rows().remove()', 'row().remove()', function () {
var that = this;
this.iterator( 'row', function ( settings, row, thatIdx ) {
var data = settings.aoData;
var rowData = data[ row ];
var i, ien, j, jen;
var loopRow, loopCells;
data.splice( row, 1 );
// Update the cached indexes
for ( i=0, ien=data.length ; i<ien ; i++ ) {
loopRow = data[i];
loopCells = loopRow.anCells;
// Rows
if ( loopRow.nTr !== null ) {
loopRow.nTr._DT_RowIndex = i;
}
// Cells
if ( loopCells !== null ) {
for ( j=0, jen=loopCells.length ; j<jen ; j++ ) {
loopCells[j]._DT_CellIndex.row = i;
}
}
}
// Delete from the display arrays
_fnDeleteIndex( settings.aiDisplayMaster, row );
_fnDeleteIndex( settings.aiDisplay, row );
_fnDeleteIndex( that[ thatIdx ], row, false ); // maintain local indexes
// For server-side processing tables - subtract the deleted row from the count
if ( settings._iRecordsDisplay > 0 ) {
settings._iRecordsDisplay--;
}
// Check for an 'overflow' they case for displaying the table
_fnLengthOverflow( settings );
// Remove the row's ID reference if there is one
var id = settings.rowIdFn( rowData._aData );
if ( id !== undefined ) {
delete settings.aIds[ id ];
}
} );
this.iterator( 'table', function ( settings ) {
for ( var i=0, ien=settings.aoData.length ; i<ien ; i++ ) {
settings.aoData[i].idx = i;
}
} );
return this;
} );
_api_register( 'rows.add()', function ( rows ) {
var newRows = this.iterator( 'table', function ( settings ) {
var row, i, ien;
var out = [];
for ( i=0, ien=rows.length ; i<ien ; i++ ) {
row = rows[i];
if ( row.nodeName && row.nodeName.toUpperCase() === 'TR' ) {
out.push( _fnAddTr( settings, row )[0] );
}
else {
out.push( _fnAddData( settings, row ) );
}
}
return out;
}, 1 );
// Return an Api.rows() extended instance, so rows().nodes() etc can be used
var modRows = this.rows( -1 );
modRows.pop();
$.merge( modRows, newRows );
return modRows;
} );
/**
*
*/
_api_register( 'row()', function ( selector, opts ) {
return _selector_first( this.rows( selector, opts ) );
} );
_api_register( 'row().data()', function ( data ) {
var ctx = this.context;
if ( data === undefined ) {
// Get
return ctx.length && this.length ?
ctx[0].aoData[ this[0] ]._aData :
undefined;
}
// Set
ctx[0].aoData[ this[0] ]._aData = data;
// Automatically invalidate
_fnInvalidate( ctx[0], this[0], 'data' );
return this;
} );
_api_register( 'row().node()', function () {
var ctx = this.context;
return ctx.length && this.length ?
ctx[0].aoData[ this[0] ].nTr || null :
null;
} );
_api_register( 'row.add()', function ( row ) {
// Allow a jQuery object to be passed in - only a single row is added from
// it though - the first element in the set
if ( row instanceof $ && row.length ) {
row = row[0];
}
var rows = this.iterator( 'table', function ( settings ) {
if ( row.nodeName && row.nodeName.toUpperCase() === 'TR' ) {
return _fnAddTr( settings, row )[0];
}
return _fnAddData( settings, row );
} );
// Return an Api.rows() extended instance, with the newly added row selected
return this.row( rows[0] );
} );
var __details_add = function ( ctx, row, data, klass )
{
// Convert to array of TR elements
var rows = [];
var addRow = function ( r, k ) {
// Recursion to allow for arrays of jQuery objects
if ( $.isArray( r ) || r instanceof $ ) {
for ( var i=0, ien=r.length ; i<ien ; i++ ) {
addRow( r[i], k );
}
return;
}
// If we get a TR element, then just add it directly - up to the dev
// to add the correct number of columns etc
if ( r.nodeName && r.nodeName.toLowerCase() === 'tr' ) {
rows.push( r );
}
else {
// Otherwise create a row with a wrapper
var created = $('<tr><td/></tr>').addClass( k );
$('td', created)
.addClass( k )
.html( r )
[0].colSpan = _fnVisbleColumns( ctx );
rows.push( created[0] );
}
};
addRow( data, klass );
if ( row._details ) {
row._details.detach();
}
row._details = $(rows);
// If the children were already shown, that state should be retained
if ( row._detailsShow ) {
row._details.insertAfter( row.nTr );
}
};
var __details_remove = function ( api, idx )
{
var ctx = api.context;
if ( ctx.length ) {
var row = ctx[0].aoData[ idx !== undefined ? idx : api[0] ];
if ( row && row._details ) {
row._details.remove();
row._detailsShow = undefined;
row._details = undefined;
}
}
};
var __details_display = function ( api, show ) {
var ctx = api.context;
if ( ctx.length && api.length ) {
var row = ctx[0].aoData[ api[0] ];
if ( row._details ) {
row._detailsShow = show;
if ( show ) {
row._details.insertAfter( row.nTr );
}
else {
row._details.detach();
}
__details_events( ctx[0] );
}
}
};
var __details_events = function ( settings )
{
var api = new _Api( settings );
var namespace = '.dt.DT_details';
var drawEvent = 'draw'+namespace;
var colvisEvent = 'column-visibility'+namespace;
var destroyEvent = 'destroy'+namespace;
var data = settings.aoData;
api.off( drawEvent +' '+ colvisEvent +' '+ destroyEvent );
if ( _pluck( data, '_details' ).length > 0 ) {
// On each draw, insert the required elements into the document
api.on( drawEvent, function ( e, ctx ) {
if ( settings !== ctx ) {
return;
}
api.rows( {page:'current'} ).eq(0).each( function (idx) {
// Internal data grab
var row = data[ idx ];
if ( row._detailsShow ) {
row._details.insertAfter( row.nTr );
}
} );
} );
// Column visibility change - update the colspan
api.on( colvisEvent, function ( e, ctx, idx, vis ) {
if ( settings !== ctx ) {
return;
}
// Update the colspan for the details rows (note, only if it already has
// a colspan)
var row, visible = _fnVisbleColumns( ctx );
for ( var i=0, ien=data.length ; i<ien ; i++ ) {
row = data[i];
if ( row._details ) {
row._details.children('td[colspan]').attr('colspan', visible );
}
}
} );
// Table destroyed - nuke any child rows
api.on( destroyEvent, function ( e, ctx ) {
if ( settings !== ctx ) {
return;
}
for ( var i=0, ien=data.length ; i<ien ; i++ ) {
if ( data[i]._details ) {
__details_remove( api, i );
}
}
} );
}
};
// Strings for the method names to help minification
var _emp = '';
var _child_obj = _emp+'row().child';
var _child_mth = _child_obj+'()';
// data can be:
// tr
// string
// jQuery or array of any of the above
_api_register( _child_mth, function ( data, klass ) {
var ctx = this.context;
if ( data === undefined ) {
// get
return ctx.length && this.length ?
ctx[0].aoData[ this[0] ]._details :
undefined;
}
else if ( data === true ) {
// show
this.child.show();
}
else if ( data === false ) {
// remove
__details_remove( this );
}
else if ( ctx.length && this.length ) {
// set
__details_add( ctx[0], ctx[0].aoData[ this[0] ], data, klass );
}
return this;
} );
_api_register( [
_child_obj+'.show()',
_child_mth+'.show()' // only when `child()` was called with parameters (without
], function ( show ) { // it returns an object and this method is not executed)
__details_display( this, true );
return this;
} );
_api_register( [
_child_obj+'.hide()',
_child_mth+'.hide()' // only when `child()` was called with parameters (without
], function () { // it returns an object and this method is not executed)
__details_display( this, false );
return this;
} );
_api_register( [
_child_obj+'.remove()',
_child_mth+'.remove()' // only when `child()` was called with parameters (without
], function () { // it returns an object and this method is not executed)
__details_remove( this );
return this;
} );
_api_register( _child_obj+'.isShown()', function () {
var ctx = this.context;
if ( ctx.length && this.length ) {
// _detailsShown as false or undefined will fall through to return false
return ctx[0].aoData[ this[0] ]._detailsShow || false;
}
return false;
} );
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Columns
*
* {integer} - column index (>=0 count from left, <0 count from right)
* "{integer}:visIdx" - visible column index (i.e. translate to column index) (>=0 count from left, <0 count from right)
* "{integer}:visible" - alias for {integer}:visIdx (>=0 count from left, <0 count from right)
* "{string}:name" - column name
* "{string}" - jQuery selector on column header nodes
*
*/
// can be an array of these items, comma separated list, or an array of comma
// separated lists
var __re_column_selector = /^([^:]+):(name|visIdx|visible)$/;
// r1 and r2 are redundant - but it means that the parameters match for the
// iterator callback in columns().data()
var __columnData = function ( settings, column, r1, r2, rows ) {
var a = [];
for ( var row=0, ien=rows.length ; row<ien ; row++ ) {
a.push( _fnGetCellData( settings, rows[row], column ) );
}
return a;
};
var __column_selector = function ( settings, selector, opts )
{
var
columns = settings.aoColumns,
names = _pluck( columns, 'sName' ),
nodes = _pluck( columns, 'nTh' );
var run = function ( s ) {
var selInt = _intVal( s );
// Selector - all
if ( s === '' ) {
return _range( columns.length );
}
// Selector - index
if ( selInt !== null ) {
return [ selInt >= 0 ?
selInt : // Count from left
columns.length + selInt // Count from right (+ because its a negative value)
];
}
// Selector = function
if ( typeof s === 'function' ) {
var rows = _selector_row_indexes( settings, opts );
return $.map( columns, function (col, idx) {
return s(
idx,
__columnData( settings, idx, 0, 0, rows ),
nodes[ idx ]
) ? idx : null;
} );
}
// jQuery or string selector
var match = typeof s === 'string' ?
s.match( __re_column_selector ) :
'';
if ( match ) {
switch( match[2] ) {
case 'visIdx':
case 'visible':
var idx = parseInt( match[1], 10 );
// Visible index given, convert to column index
if ( idx < 0 ) {
// Counting from the right
var visColumns = $.map( columns, function (col,i) {
return col.bVisible ? i : null;
} );
return [ visColumns[ visColumns.length + idx ] ];
}
// Counting from the left
return [ _fnVisibleToColumnIndex( settings, idx ) ];
case 'name':
// match by name. `names` is column index complete and in order
return $.map( names, function (name, i) {
return name === match[1] ? i : null;
} );
default:
return [];
}
}
// Cell in the table body
if ( s.nodeName && s._DT_CellIndex ) {
return [ s._DT_CellIndex.column ];
}
// jQuery selector on the TH elements for the columns
var jqResult = $( nodes )
.filter( s )
.map( function () {
return $.inArray( this, nodes ); // `nodes` is column index complete and in order
} )
.toArray();
if ( jqResult.length || ! s.nodeName ) {
return jqResult;
}
// Otherwise a node which might have a `dt-column` data attribute, or be
// a child or such an element
var host = $(s).closest('*[data-dt-column]');
return host.length ?
[ host.data('dt-column') ] :
[];
};
return _selector_run( 'column', selector, run, settings, opts );
};
var __setColumnVis = function ( settings, column, vis ) {
var
cols = settings.aoColumns,
col = cols[ column ],
data = settings.aoData,
row, cells, i, ien, tr;
// Get
if ( vis === undefined ) {
return col.bVisible;
}
// Set
// No change
if ( col.bVisible === vis ) {
return;
}
if ( vis ) {
// Insert column
// Need to decide if we should use appendChild or insertBefore
var insertBefore = $.inArray( true, _pluck(cols, 'bVisible'), column+1 );
for ( i=0, ien=data.length ; i<ien ; i++ ) {
tr = data[i].nTr;
cells = data[i].anCells;
if ( tr ) {
// insertBefore can act like appendChild if 2nd arg is null
tr.insertBefore( cells[ column ], cells[ insertBefore ] || null );
}
}
}
else {
// Remove column
$( _pluck( settings.aoData, 'anCells', column ) ).detach();
}
// Common actions
col.bVisible = vis;
_fnDrawHead( settings, settings.aoHeader );
_fnDrawHead( settings, settings.aoFooter );
_fnSaveState( settings );
};
_api_register( 'columns()', function ( selector, opts ) {
// argument shifting
if ( selector === undefined ) {
selector = '';
}
else if ( $.isPlainObject( selector ) ) {
opts = selector;
selector = '';
}
opts = _selector_opts( opts );
var inst = this.iterator( 'table', function ( settings ) {
return __column_selector( settings, selector, opts );
}, 1 );
// Want argument shifting here and in _row_selector?
inst.selector.cols = selector;
inst.selector.opts = opts;
return inst;
} );
_api_registerPlural( 'columns().header()', 'column().header()', function ( selector, opts ) {
return this.iterator( 'column', function ( settings, column ) {
return settings.aoColumns[column].nTh;
}, 1 );
} );
_api_registerPlural( 'columns().footer()', 'column().footer()', function ( selector, opts ) {
return this.iterator( 'column', function ( settings, column ) {
return settings.aoColumns[column].nTf;
}, 1 );
} );
_api_registerPlural( 'columns().data()', 'column().data()', function () {
return this.iterator( 'column-rows', __columnData, 1 );
} );
_api_registerPlural( 'columns().dataSrc()', 'column().dataSrc()', function () {
return this.iterator( 'column', function ( settings, column ) {
return settings.aoColumns[column].mData;
}, 1 );
} );
_api_registerPlural( 'columns().cache()', 'column().cache()', function ( type ) {
return this.iterator( 'column-rows', function ( settings, column, i, j, rows ) {
return _pluck_order( settings.aoData, rows,
type === 'search' ? '_aFilterData' : '_aSortData', column
);
}, 1 );
} );
_api_registerPlural( 'columns().nodes()', 'column().nodes()', function () {
return this.iterator( 'column-rows', function ( settings, column, i, j, rows ) {
return _pluck_order( settings.aoData, rows, 'anCells', column ) ;
}, 1 );
} );
_api_registerPlural( 'columns().visible()', 'column().visible()', function ( vis, calc ) {
var ret = this.iterator( 'column', function ( settings, column ) {
if ( vis === undefined ) {
return settings.aoColumns[ column ].bVisible;
} // else
__setColumnVis( settings, column, vis );
} );
// Group the column visibility changes
if ( vis !== undefined ) {
// Second loop once the first is done for events
this.iterator( 'column', function ( settings, column ) {
_fnCallbackFire( settings, null, 'column-visibility', [settings, column, vis, calc] );
} );
if ( calc === undefined || calc ) {
this.columns.adjust();
}
}
return ret;
} );
_api_registerPlural( 'columns().indexes()', 'column().index()', function ( type ) {
return this.iterator( 'column', function ( settings, column ) {
return type === 'visible' ?
_fnColumnIndexToVisible( settings, column ) :
column;
}, 1 );
} );
_api_register( 'columns.adjust()', function () {
return this.iterator( 'table', function ( settings ) {
_fnAdjustColumnSizing( settings );
}, 1 );
} );
_api_register( 'column.index()', function ( type, idx ) {
if ( this.context.length !== 0 ) {
var ctx = this.context[0];
if ( type === 'fromVisible' || type === 'toData' ) {
return _fnVisibleToColumnIndex( ctx, idx );
}
else if ( type === 'fromData' || type === 'toVisible' ) {
return _fnColumnIndexToVisible( ctx, idx );
}
}
} );
_api_register( 'column()', function ( selector, opts ) {
return _selector_first( this.columns( selector, opts ) );
} );
var __cell_selector = function ( settings, selector, opts )
{
var data = settings.aoData;
var rows = _selector_row_indexes( settings, opts );
var cells = _removeEmpty( _pluck_order( data, rows, 'anCells' ) );
var allCells = $( [].concat.apply([], cells) );
var row;
var columns = settings.aoColumns.length;
var a, i, ien, j, o, host;
var run = function ( s ) {
var fnSelector = typeof s === 'function';
if ( s === null || s === undefined || fnSelector ) {
// All cells and function selectors
a = [];
for ( i=0, ien=rows.length ; i<ien ; i++ ) {
row = rows[i];
for ( j=0 ; j<columns ; j++ ) {
o = {
row: row,
column: j
};
if ( fnSelector ) {
// Selector - function
host = data[ row ];
if ( s( o, _fnGetCellData(settings, row, j), host.anCells ? host.anCells[j] : null ) ) {
a.push( o );
}
}
else {
// Selector - all
a.push( o );
}
}
}
return a;
}
// Selector - index
if ( $.isPlainObject( s ) ) {
return [s];
}
// Selector - jQuery filtered cells
var jqResult = allCells
.filter( s )
.map( function (i, el) {
return { // use a new object, in case someone changes the values
row: el._DT_CellIndex.row,
column: el._DT_CellIndex.column
};
} )
.toArray();
if ( jqResult.length || ! s.nodeName ) {
return jqResult;
}
// Otherwise the selector is a node, and there is one last option - the
// element might be a child of an element which has dt-row and dt-column
// data attributes
host = $(s).closest('*[data-dt-row]');
return host.length ?
[ {
row: host.data('dt-row'),
column: host.data('dt-column')
} ] :
[];
};
return _selector_run( 'cell', selector, run, settings, opts );
};
_api_register( 'cells()', function ( rowSelector, columnSelector, opts ) {
// Argument shifting
if ( $.isPlainObject( rowSelector ) ) {
// Indexes
if ( rowSelector.row === undefined ) {
// Selector options in first parameter
opts = rowSelector;
rowSelector = null;
}
else {
// Cell index objects in first parameter
opts = columnSelector;
columnSelector = null;
}
}
if ( $.isPlainObject( columnSelector ) ) {
opts = columnSelector;
columnSelector = null;
}
// Cell selector
if ( columnSelector === null || columnSelector === undefined ) {
return this.iterator( 'table', function ( settings ) {
return __cell_selector( settings, rowSelector, _selector_opts( opts ) );
} );
}
// Row + column selector
var columns = this.columns( columnSelector, opts );
var rows = this.rows( rowSelector, opts );
var a, i, ien, j, jen;
var cells = this.iterator( 'table', function ( settings, idx ) {
a = [];
for ( i=0, ien=rows[idx].length ; i<ien ; i++ ) {
for ( j=0, jen=columns[idx].length ; j<jen ; j++ ) {
a.push( {
row: rows[idx][i],
column: columns[idx][j]
} );
}
}
return a;
}, 1 );
$.extend( cells.selector, {
cols: columnSelector,
rows: rowSelector,
opts: opts
} );
return cells;
} );
_api_registerPlural( 'cells().nodes()', 'cell().node()', function () {
return this.iterator( 'cell', function ( settings, row, column ) {
var data = settings.aoData[ row ];
return data && data.anCells ?
data.anCells[ column ] :
undefined;
}, 1 );
} );
_api_register( 'cells().data()', function () {
return this.iterator( 'cell', function ( settings, row, column ) {
return _fnGetCellData( settings, row, column );
}, 1 );
} );
_api_registerPlural( 'cells().cache()', 'cell().cache()', function ( type ) {
type = type === 'search' ? '_aFilterData' : '_aSortData';
return this.iterator( 'cell', function ( settings, row, column ) {
return settings.aoData[ row ][ type ][ column ];
}, 1 );
} );
_api_registerPlural( 'cells().render()', 'cell().render()', function ( type ) {
return this.iterator( 'cell', function ( settings, row, column ) {
return _fnGetCellData( settings, row, column, type );
}, 1 );
} );
_api_registerPlural( 'cells().indexes()', 'cell().index()', function () {
return this.iterator( 'cell', function ( settings, row, column ) {
return {
row: row,
column: column,
columnVisible: _fnColumnIndexToVisible( settings, column )
};
}, 1 );
} );
_api_registerPlural( 'cells().invalidate()', 'cell().invalidate()', function ( src ) {
return this.iterator( 'cell', function ( settings, row, column ) {
_fnInvalidate( settings, row, src, column );
} );
} );
_api_register( 'cell()', function ( rowSelector, columnSelector, opts ) {
return _selector_first( this.cells( rowSelector, columnSelector, opts ) );
} );
_api_register( 'cell().data()', function ( data ) {
var ctx = this.context;
var cell = this[0];
if ( data === undefined ) {
// Get
return ctx.length && cell.length ?
_fnGetCellData( ctx[0], cell[0].row, cell[0].column ) :
undefined;
}
// Set
_fnSetCellData( ctx[0], cell[0].row, cell[0].column, data );
_fnInvalidate( ctx[0], cell[0].row, 'data', cell[0].column );
return this;
} );
/**
* Get current ordering (sorting) that has been applied to the table.
*
* @returns {array} 2D array containing the sorting information for the first
* table in the current context. Each element in the parent array represents
* a column being sorted upon (i.e. multi-sorting with two columns would have
* 2 inner arrays). The inner arrays may have 2 or 3 elements. The first is
* the column index that the sorting condition applies to, the second is the
* direction of the sort (`desc` or `asc`) and, optionally, the third is the
* index of the sorting order from the `column.sorting` initialisation array.
*//**
* Set the ordering for the table.
*
* @param {integer} order Column index to sort upon.
* @param {string} direction Direction of the sort to be applied (`asc` or `desc`)
* @returns {DataTables.Api} this
*//**
* Set the ordering for the table.
*
* @param {array} order 1D array of sorting information to be applied.
* @param {array} [...] Optional additional sorting conditions
* @returns {DataTables.Api} this
*//**
* Set the ordering for the table.
*
* @param {array} order 2D array of sorting information to be applied.
* @returns {DataTables.Api} this
*/
_api_register( 'order()', function ( order, dir ) {
var ctx = this.context;
if ( order === undefined ) {
// get
return ctx.length !== 0 ?
ctx[0].aaSorting :
undefined;
}
// set
if ( typeof order === 'number' ) {
// Simple column / direction passed in
order = [ [ order, dir ] ];
}
else if ( order.length && ! $.isArray( order[0] ) ) {
// Arguments passed in (list of 1D arrays)
order = Array.prototype.slice.call( arguments );
}
// otherwise a 2D array was passed in
return this.iterator( 'table', function ( settings ) {
settings.aaSorting = order.slice();
} );
} );
/**
* Attach a sort listener to an element for a given column
*
* @param {node|jQuery|string} node Identifier for the element(s) to attach the
* listener to. This can take the form of a single DOM node, a jQuery
* collection of nodes or a jQuery selector which will identify the node(s).
* @param {integer} column the column that a click on this node will sort on
* @param {function} [callback] callback function when sort is run
* @returns {DataTables.Api} this
*/
_api_register( 'order.listener()', function ( node, column, callback ) {
return this.iterator( 'table', function ( settings ) {
_fnSortAttachListener( settings, node, column, callback );
} );
} );
_api_register( 'order.fixed()', function ( set ) {
if ( ! set ) {
var ctx = this.context;
var fixed = ctx.length ?
ctx[0].aaSortingFixed :
undefined;
return $.isArray( fixed ) ?
{ pre: fixed } :
fixed;
}
return this.iterator( 'table', function ( settings ) {
settings.aaSortingFixed = $.extend( true, {}, set );
} );
} );
// Order by the selected column(s)
_api_register( [
'columns().order()',
'column().order()'
], function ( dir ) {
var that = this;
return this.iterator( 'table', function ( settings, i ) {
var sort = [];
$.each( that[i], function (j, col) {
sort.push( [ col, dir ] );
} );
settings.aaSorting = sort;
} );
} );
_api_register( 'search()', function ( input, regex, smart, caseInsen ) {
var ctx = this.context;
if ( input === undefined ) {
// get
return ctx.length !== 0 ?
ctx[0].oPreviousSearch.sSearch :
undefined;
}
// set
return this.iterator( 'table', function ( settings ) {
if ( ! settings.oFeatures.bFilter ) {
return;
}
_fnFilterComplete( settings, $.extend( {}, settings.oPreviousSearch, {
"sSearch": input+"",
"bRegex": regex === null ? false : regex,
"bSmart": smart === null ? true : smart,
"bCaseInsensitive": caseInsen === null ? true : caseInsen
} ), 1 );
} );
} );
_api_registerPlural(
'columns().search()',
'column().search()',
function ( input, regex, smart, caseInsen ) {
return this.iterator( 'column', function ( settings, column ) {
var preSearch = settings.aoPreSearchCols;
if ( input === undefined ) {
// get
return preSearch[ column ].sSearch;
}
// set
if ( ! settings.oFeatures.bFilter ) {
return;
}
$.extend( preSearch[ column ], {
"sSearch": input+"",
"bRegex": regex === null ? false : regex,
"bSmart": smart === null ? true : smart,
"bCaseInsensitive": caseInsen === null ? true : caseInsen
} );
_fnFilterComplete( settings, settings.oPreviousSearch, 1 );
} );
}
);
/*
* State API methods
*/
_api_register( 'state()', function () {
return this.context.length ?
this.context[0].oSavedState :
null;
} );
_api_register( 'state.clear()', function () {
return this.iterator( 'table', function ( settings ) {
// Save an empty object
settings.fnStateSaveCallback.call( settings.oInstance, settings, {} );
} );
} );
_api_register( 'state.loaded()', function () {
return this.context.length ?
this.context[0].oLoadedState :
null;
} );
_api_register( 'state.save()', function () {
return this.iterator( 'table', function ( settings ) {
_fnSaveState( settings );
} );
} );
/**
* Provide a common method for plug-ins to check the version of DataTables being
* used, in order to ensure compatibility.
*
* @param {string} version Version string to check for, in the format "X.Y.Z".
* Note that the formats "X" and "X.Y" are also acceptable.
* @returns {boolean} true if this version of DataTables is greater or equal to
* the required version, or false if this version of DataTales is not
* suitable
* @static
* @dtopt API-Static
*
* @example
* alert( $.fn.dataTable.versionCheck( '1.9.0' ) );
*/
DataTable.versionCheck = DataTable.fnVersionCheck = function( version )
{
var aThis = DataTable.version.split('.');
var aThat = version.split('.');
var iThis, iThat;
for ( var i=0, iLen=aThat.length ; i<iLen ; i++ ) {
iThis = parseInt( aThis[i], 10 ) || 0;
iThat = parseInt( aThat[i], 10 ) || 0;
// Parts are the same, keep comparing
if (iThis === iThat) {
continue;
}
// Parts are different, return immediately
return iThis > iThat;
}
return true;
};
/**
* Check if a `<table>` node is a DataTable table already or not.
*
* @param {node|jquery|string} table Table node, jQuery object or jQuery
* selector for the table to test. Note that if more than more than one
* table is passed on, only the first will be checked
* @returns {boolean} true the table given is a DataTable, or false otherwise
* @static
* @dtopt API-Static
*
* @example
* if ( ! $.fn.DataTable.isDataTable( '#example' ) ) {
* $('#example').dataTable();
* }
*/
DataTable.isDataTable = DataTable.fnIsDataTable = function ( table )
{
var t = $(table).get(0);
var is = false;
if ( table instanceof DataTable.Api ) {
return true;
}
$.each( DataTable.settings, function (i, o) {
var head = o.nScrollHead ? $('table', o.nScrollHead)[0] : null;
var foot = o.nScrollFoot ? $('table', o.nScrollFoot)[0] : null;
if ( o.nTable === t || head === t || foot === t ) {
is = true;
}
} );
return is;
};
/**
* Get all DataTable tables that have been initialised - optionally you can
* select to get only currently visible tables.
*
* @param {boolean} [visible=false] Flag to indicate if you want all (default)
* or visible tables only.
* @returns {array} Array of `table` nodes (not DataTable instances) which are
* DataTables
* @static
* @dtopt API-Static
*
* @example
* $.each( $.fn.dataTable.tables(true), function () {
* $(table).DataTable().columns.adjust();
* } );
*/
DataTable.tables = DataTable.fnTables = function ( visible )
{
var api = false;
if ( $.isPlainObject( visible ) ) {
api = visible.api;
visible = visible.visible;
}
var a = $.map( DataTable.settings, function (o) {
if ( !visible || (visible && $(o.nTable).is(':visible')) ) {
return o.nTable;
}
} );
return api ?
new _Api( a ) :
a;
};
/**
* Convert from camel case parameters to Hungarian notation. This is made public
* for the extensions to provide the same ability as DataTables core to accept
* either the 1.9 style Hungarian notation, or the 1.10+ style camelCase
* parameters.
*
* @param {object} src The model object which holds all parameters that can be
* mapped.
* @param {object} user The object to convert from camel case to Hungarian.
* @param {boolean} force When set to `true`, properties which already have a
* Hungarian value in the `user` object will be overwritten. Otherwise they
* won't be.
*/
DataTable.camelToHungarian = _fnCamelToHungarian;
/**
*
*/
_api_register( '$()', function ( selector, opts ) {
var
rows = this.rows( opts ).nodes(), // Get all rows
jqRows = $(rows);
return $( [].concat(
jqRows.filter( selector ).toArray(),
jqRows.find( selector ).toArray()
) );
} );
// jQuery functions to operate on the tables
$.each( [ 'on', 'one', 'off' ], function (i, key) {
_api_register( key+'()', function ( /* event, handler */ ) {
var args = Array.prototype.slice.call(arguments);
// Add the `dt` namespace automatically if it isn't already present
args[0] = $.map( args[0].split( /\s/ ), function ( e ) {
return ! e.match(/\.dt\b/) ?
e+'.dt' :
e;
} ).join( ' ' );
var inst = $( this.tables().nodes() );
inst[key].apply( inst, args );
return this;
} );
} );
_api_register( 'clear()', function () {
return this.iterator( 'table', function ( settings ) {
_fnClearTable( settings );
} );
} );
_api_register( 'settings()', function () {
return new _Api( this.context, this.context );
} );
_api_register( 'init()', function () {
var ctx = this.context;
return ctx.length ? ctx[0].oInit : null;
} );
_api_register( 'data()', function () {
return this.iterator( 'table', function ( settings ) {
return _pluck( settings.aoData, '_aData' );
} ).flatten();
} );
_api_register( 'destroy()', function ( remove ) {
remove = remove || false;
return this.iterator( 'table', function ( settings ) {
var orig = settings.nTableWrapper.parentNode;
var classes = settings.oClasses;
var table = settings.nTable;
var tbody = settings.nTBody;
var thead = settings.nTHead;
var tfoot = settings.nTFoot;
var jqTable = $(table);
var jqTbody = $(tbody);
var jqWrapper = $(settings.nTableWrapper);
var rows = $.map( settings.aoData, function (r) { return r.nTr; } );
var i, ien;
// Flag to note that the table is currently being destroyed - no action
// should be taken
settings.bDestroying = true;
// Fire off the destroy callbacks for plug-ins etc
_fnCallbackFire( settings, "aoDestroyCallback", "destroy", [settings] );
// If not being removed from the document, make all columns visible
if ( ! remove ) {
new _Api( settings ).columns().visible( true );
}
// Blitz all `DT` namespaced events (these are internal events, the
// lowercase, `dt` events are user subscribed and they are responsible
// for removing them
jqWrapper.off('.DT').find(':not(tbody *)').off('.DT');
$(window).off('.DT-'+settings.sInstance);
// When scrolling we had to break the table up - restore it
if ( table != thead.parentNode ) {
jqTable.children('thead').detach();
jqTable.append( thead );
}
if ( tfoot && table != tfoot.parentNode ) {
jqTable.children('tfoot').detach();
jqTable.append( tfoot );
}
settings.aaSorting = [];
settings.aaSortingFixed = [];
_fnSortingClasses( settings );
$( rows ).removeClass( settings.asStripeClasses.join(' ') );
$('th, td', thead).removeClass( classes.sSortable+' '+
classes.sSortableAsc+' '+classes.sSortableDesc+' '+classes.sSortableNone
);
// Add the TR elements back into the table in their original order
jqTbody.children().detach();
jqTbody.append( rows );
// Remove the DataTables generated nodes, events and classes
var removedMethod = remove ? 'remove' : 'detach';
jqTable[ removedMethod ]();
jqWrapper[ removedMethod ]();
// If we need to reattach the table to the document
if ( ! remove && orig ) {
// insertBefore acts like appendChild if !arg[1]
orig.insertBefore( table, settings.nTableReinsertBefore );
// Restore the width of the original table - was read from the style property,
// so we can restore directly to that
jqTable
.css( 'width', settings.sDestroyWidth )
.removeClass( classes.sTable );
// If the were originally stripe classes - then we add them back here.
// Note this is not fool proof (for example if not all rows had stripe
// classes - but it's a good effort without getting carried away
ien = settings.asDestroyStripes.length;
if ( ien ) {
jqTbody.children().each( function (i) {
$(this).addClass( settings.asDestroyStripes[i % ien] );
} );
}
}
/* Remove the settings object from the settings array */
var idx = $.inArray( settings, DataTable.settings );
if ( idx !== -1 ) {
DataTable.settings.splice( idx, 1 );
}
} );
} );
// Add the `every()` method for rows, columns and cells in a compact form
$.each( [ 'column', 'row', 'cell' ], function ( i, type ) {
_api_register( type+'s().every()', function ( fn ) {
var opts = this.selector.opts;
var api = this;
return this.iterator( type, function ( settings, arg1, arg2, arg3, arg4 ) {
// Rows and columns:
// arg1 - index
// arg2 - table counter
// arg3 - loop counter
// arg4 - undefined
// Cells:
// arg1 - row index
// arg2 - column index
// arg3 - table counter
// arg4 - loop counter
fn.call(
api[ type ](
arg1,
type==='cell' ? arg2 : opts,
type==='cell' ? opts : undefined
),
arg1, arg2, arg3, arg4
);
} );
} );
} );
// i18n method for extensions to be able to use the language object from the
// DataTable
_api_register( 'i18n()', function ( token, def, plural ) {
var ctx = this.context[0];
var resolved = _fnGetObjectDataFn( token )( ctx.oLanguage );
if ( resolved === undefined ) {
resolved = def;
}
if ( plural !== undefined && $.isPlainObject( resolved ) ) {
resolved = resolved[ plural ] !== undefined ?
resolved[ plural ] :
resolved._;
}
return resolved.replace( '%d', plural ); // nb: plural might be undefined,
} );
/**
* Version string for plug-ins to check compatibility. Allowed format is
* `a.b.c-d` where: a:int, b:int, c:int, d:string(dev|beta|alpha). `d` is used
* only for non-release builds. See http://semver.org/ for more information.
* @member
* @type string
* @default Version number
*/
DataTable.version = "1.10.16";
/**
* Private data store, containing all of the settings objects that are
* created for the tables on a given page.
*
* Note that the `DataTable.settings` object is aliased to
* `jQuery.fn.dataTableExt` through which it may be accessed and
* manipulated, or `jQuery.fn.dataTable.settings`.
* @member
* @type array
* @default []
* @private
*/
DataTable.settings = [];
/**
* Object models container, for the various models that DataTables has
* available to it. These models define the objects that are used to hold
* the active state and configuration of the table.
* @namespace
*/
DataTable.models = {};
/**
* Template object for the way in which DataTables holds information about
* search information for the global filter and individual column filters.
* @namespace
*/
DataTable.models.oSearch = {
/**
* Flag to indicate if the filtering should be case insensitive or not
* @type boolean
* @default true
*/
"bCaseInsensitive": true,
/**
* Applied search term
* @type string
* @default <i>Empty string</i>
*/
"sSearch": "",
/**
* Flag to indicate if the search term should be interpreted as a
* regular expression (true) or not (false) and therefore and special
* regex characters escaped.
* @type boolean
* @default false
*/
"bRegex": false,
/**
* Flag to indicate if DataTables is to use its smart filtering or not.
* @type boolean
* @default true
*/
"bSmart": true
};
/**
* Template object for the way in which DataTables holds information about
* each individual row. This is the object format used for the settings
* aoData array.
* @namespace
*/
DataTable.models.oRow = {
/**
* TR element for the row
* @type node
* @default null
*/
"nTr": null,
/**
* Array of TD elements for each row. This is null until the row has been
* created.
* @type array nodes
* @default []
*/
"anCells": null,
/**
* Data object from the original data source for the row. This is either
* an array if using the traditional form of DataTables, or an object if
* using mData options. The exact type will depend on the passed in
* data from the data source, or will be an array if using DOM a data
* source.
* @type array|object
* @default []
*/
"_aData": [],
/**
* Sorting data cache - this array is ostensibly the same length as the
* number of columns (although each index is generated only as it is
* needed), and holds the data that is used for sorting each column in the
* row. We do this cache generation at the start of the sort in order that
* the formatting of the sort data need be done only once for each cell
* per sort. This array should not be read from or written to by anything
* other than the master sorting methods.
* @type array
* @default null
* @private
*/
"_aSortData": null,
/**
* Per cell filtering data cache. As per the sort data cache, used to
* increase the performance of the filtering in DataTables
* @type array
* @default null
* @private
*/
"_aFilterData": null,
/**
* Filtering data cache. This is the same as the cell filtering cache, but
* in this case a string rather than an array. This is easily computed with
* a join on `_aFilterData`, but is provided as a cache so the join isn't
* needed on every search (memory traded for performance)
* @type array
* @default null
* @private
*/
"_sFilterRow": null,
/**
* Cache of the class name that DataTables has applied to the row, so we
* can quickly look at this variable rather than needing to do a DOM check
* on className for the nTr property.
* @type string
* @default <i>Empty string</i>
* @private
*/
"_sRowStripe": "",
/**
* Denote if the original data source was from the DOM, or the data source
* object. This is used for invalidating data, so DataTables can
* automatically read data from the original source, unless uninstructed
* otherwise.
* @type string
* @default null
* @private
*/
"src": null,
/**
* Index in the aoData array. This saves an indexOf lookup when we have the
* object, but want to know the index
* @type integer
* @default -1
* @private
*/
"idx": -1
};
/**
* Template object for the column information object in DataTables. This object
* is held in the settings aoColumns array and contains all the information that
* DataTables needs about each individual column.
*
* Note that this object is related to {@link DataTable.defaults.column}
* but this one is the internal data store for DataTables's cache of columns.
* It should NOT be manipulated outside of DataTables. Any configuration should
* be done through the initialisation options.
* @namespace
*/
DataTable.models.oColumn = {
/**
* Column index. This could be worked out on-the-fly with $.inArray, but it
* is faster to just hold it as a variable
* @type integer
* @default null
*/
"idx": null,
/**
* A list of the columns that sorting should occur on when this column
* is sorted. That this property is an array allows multi-column sorting
* to be defined for a column (for example first name / last name columns
* would benefit from this). The values are integers pointing to the
* columns to be sorted on (typically it will be a single integer pointing
* at itself, but that doesn't need to be the case).
* @type array
*/
"aDataSort": null,
/**
* Define the sorting directions that are applied to the column, in sequence
* as the column is repeatedly sorted upon - i.e. the first value is used
* as the sorting direction when the column if first sorted (clicked on).
* Sort it again (click again) and it will move on to the next index.
* Repeat until loop.
* @type array
*/
"asSorting": null,
/**
* Flag to indicate if the column is searchable, and thus should be included
* in the filtering or not.
* @type boolean
*/
"bSearchable": null,
/**
* Flag to indicate if the column is sortable or not.
* @type boolean
*/
"bSortable": null,
/**
* Flag to indicate if the column is currently visible in the table or not
* @type boolean
*/
"bVisible": null,
/**
* Store for manual type assignment using the `column.type` option. This
* is held in store so we can manipulate the column's `sType` property.
* @type string
* @default null
* @private
*/
"_sManualType": null,
/**
* Flag to indicate if HTML5 data attributes should be used as the data
* source for filtering or sorting. True is either are.
* @type boolean
* @default false
* @private
*/
"_bAttrSrc": false,
/**
* Developer definable function that is called whenever a cell is created (Ajax source,
* etc) or processed for input (DOM source). This can be used as a compliment to mRender
* allowing you to modify the DOM element (add background colour for example) when the
* element is available.
* @type function
* @param {element} nTd The TD node that has been created
* @param {*} sData The Data for the cell
* @param {array|object} oData The data for the whole row
* @param {int} iRow The row index for the aoData data store
* @default null
*/
"fnCreatedCell": null,
/**
* Function to get data from a cell in a column. You should <b>never</b>
* access data directly through _aData internally in DataTables - always use
* the method attached to this property. It allows mData to function as
* required. This function is automatically assigned by the column
* initialisation method
* @type function
* @param {array|object} oData The data array/object for the array
* (i.e. aoData[]._aData)
* @param {string} sSpecific The specific data type you want to get -
* 'display', 'type' 'filter' 'sort'
* @returns {*} The data for the cell from the given row's data
* @default null
*/
"fnGetData": null,
/**
* Function to set data for a cell in the column. You should <b>never</b>
* set the data directly to _aData internally in DataTables - always use
* this method. It allows mData to function as required. This function
* is automatically assigned by the column initialisation method
* @type function
* @param {array|object} oData The data array/object for the array
* (i.e. aoData[]._aData)
* @param {*} sValue Value to set
* @default null
*/
"fnSetData": null,
/**
* Property to read the value for the cells in the column from the data
* source array / object. If null, then the default content is used, if a
* function is given then the return from the function is used.
* @type function|int|string|null
* @default null
*/
"mData": null,
/**
* Partner property to mData which is used (only when defined) to get
* the data - i.e. it is basically the same as mData, but without the
* 'set' option, and also the data fed to it is the result from mData.
* This is the rendering method to match the data method of mData.
* @type function|int|string|null
* @default null
*/
"mRender": null,
/**
* Unique header TH/TD element for this column - this is what the sorting
* listener is attached to (if sorting is enabled.)
* @type node
* @default null
*/
"nTh": null,
/**
* Unique footer TH/TD element for this column (if there is one). Not used
* in DataTables as such, but can be used for plug-ins to reference the
* footer for each column.
* @type node
* @default null
*/
"nTf": null,
/**
* The class to apply to all TD elements in the table's TBODY for the column
* @type string
* @default null
*/
"sClass": null,
/**
* When DataTables calculates the column widths to assign to each column,
* it finds the longest string in each column and then constructs a
* temporary table and reads the widths from that. The problem with this
* is that "mmm" is much wider then "iiii", but the latter is a longer
* string - thus the calculation can go wrong (doing it properly and putting
* it into an DOM object and measuring that is horribly(!) slow). Thus as
* a "work around" we provide this option. It will append its value to the
* text that is found to be the longest string for the column - i.e. padding.
* @type string
*/
"sContentPadding": null,
/**
* Allows a default value to be given for a column's data, and will be used
* whenever a null data source is encountered (this can be because mData
* is set to null, or because the data source itself is null).
* @type string
* @default null
*/
"sDefaultContent": null,
/**
* Name for the column, allowing reference to the column by name as well as
* by index (needs a lookup to work by name).
* @type string
*/
"sName": null,
/**
* Custom sorting data type - defines which of the available plug-ins in
* afnSortData the custom sorting will use - if any is defined.
* @type string
* @default std
*/
"sSortDataType": 'std',
/**
* Class to be applied to the header element when sorting on this column
* @type string
* @default null
*/
"sSortingClass": null,
/**
* Class to be applied to the header element when sorting on this column -
* when jQuery UI theming is used.
* @type string
* @default null
*/
"sSortingClassJUI": null,
/**
* Title of the column - what is seen in the TH element (nTh).
* @type string
*/
"sTitle": null,
/**
* Column sorting and filtering type
* @type string
* @default null
*/
"sType": null,
/**
* Width of the column
* @type string
* @default null
*/
"sWidth": null,
/**
* Width of the column when it was first "encountered"
* @type string
* @default null
*/
"sWidthOrig": null
};
/*
* Developer note: The properties of the object below are given in Hungarian
* notation, that was used as the interface for DataTables prior to v1.10, however
* from v1.10 onwards the primary interface is camel case. In order to avoid
* breaking backwards compatibility utterly with this change, the Hungarian
* version is still, internally the primary interface, but is is not documented
* - hence the @name tags in each doc comment. This allows a Javascript function
* to create a map from Hungarian notation to camel case (going the other direction
* would require each property to be listed, which would at around 3K to the size
* of DataTables, while this method is about a 0.5K hit.
*
* Ultimately this does pave the way for Hungarian notation to be dropped
* completely, but that is a massive amount of work and will break current
* installs (therefore is on-hold until v2).
*/
/**
* Initialisation options that can be given to DataTables at initialisation
* time.
* @namespace
*/
DataTable.defaults = {
/**
* An array of data to use for the table, passed in at initialisation which
* will be used in preference to any data which is already in the DOM. This is
* particularly useful for constructing tables purely in Javascript, for
* example with a custom Ajax call.
* @type array
* @default null
*
* @dtopt Option
* @name DataTable.defaults.data
*
* @example
* // Using a 2D array data source
* $(document).ready( function () {
* $('#example').dataTable( {
* "data": [
* ['Trident', 'Internet Explorer 4.0', 'Win 95+', 4, 'X'],
* ['Trident', 'Internet Explorer 5.0', 'Win 95+', 5, 'C'],
* ],
* "columns": [
* { "title": "Engine" },
* { "title": "Browser" },
* { "title": "Platform" },
* { "title": "Version" },
* { "title": "Grade" }
* ]
* } );
* } );
*
* @example
* // Using an array of objects as a data source (`data`)
* $(document).ready( function () {
* $('#example').dataTable( {
* "data": [
* {
* "engine": "Trident",
* "browser": "Internet Explorer 4.0",
* "platform": "Win 95+",
* "version": 4,
* "grade": "X"
* },
* {
* "engine": "Trident",
* "browser": "Internet Explorer 5.0",
* "platform": "Win 95+",
* "version": 5,
* "grade": "C"
* }
* ],
* "columns": [
* { "title": "Engine", "data": "engine" },
* { "title": "Browser", "data": "browser" },
* { "title": "Platform", "data": "platform" },
* { "title": "Version", "data": "version" },
* { "title": "Grade", "data": "grade" }
* ]
* } );
* } );
*/
"aaData": null,
/**
* If ordering is enabled, then DataTables will perform a first pass sort on
* initialisation. You can define which column(s) the sort is performed
* upon, and the sorting direction, with this variable. The `sorting` array
* should contain an array for each column to be sorted initially containing
* the column's index and a direction string ('asc' or 'desc').
* @type array
* @default [[0,'asc']]
*
* @dtopt Option
* @name DataTable.defaults.order
*
* @example
* // Sort by 3rd column first, and then 4th column
* $(document).ready( function() {
* $('#example').dataTable( {
* "order": [[2,'asc'], [3,'desc']]
* } );
* } );
*
* // No initial sorting
* $(document).ready( function() {
* $('#example').dataTable( {
* "order": []
* } );
* } );
*/
"aaSorting": [[0,'asc']],
/**
* This parameter is basically identical to the `sorting` parameter, but
* cannot be overridden by user interaction with the table. What this means
* is that you could have a column (visible or hidden) which the sorting
* will always be forced on first - any sorting after that (from the user)
* will then be performed as required. This can be useful for grouping rows
* together.
* @type array
* @default null
*
* @dtopt Option
* @name DataTable.defaults.orderFixed
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "orderFixed": [[0,'asc']]
* } );
* } )
*/
"aaSortingFixed": [],
/**
* DataTables can be instructed to load data to display in the table from a
* Ajax source. This option defines how that Ajax call is made and where to.
*
* The `ajax` property has three different modes of operation, depending on
* how it is defined. These are:
*
* * `string` - Set the URL from where the data should be loaded from.
* * `object` - Define properties for `jQuery.ajax`.
* * `function` - Custom data get function
*
* `string`
* --------
*
* As a string, the `ajax` property simply defines the URL from which
* DataTables will load data.
*
* `object`
* --------
*
* As an object, the parameters in the object are passed to
* [jQuery.ajax](http://api.jquery.com/jQuery.ajax/) allowing fine control
* of the Ajax request. DataTables has a number of default parameters which
* you can override using this option. Please refer to the jQuery
* documentation for a full description of the options available, although
* the following parameters provide additional options in DataTables or
* require special consideration:
*
* * `data` - As with jQuery, `data` can be provided as an object, but it
* can also be used as a function to manipulate the data DataTables sends
* to the server. The function takes a single parameter, an object of
* parameters with the values that DataTables has readied for sending. An
* object may be returned which will be merged into the DataTables
* defaults, or you can add the items to the object that was passed in and
* not return anything from the function. This supersedes `fnServerParams`
* from DataTables 1.9-.
*
* * `dataSrc` - By default DataTables will look for the property `data` (or
* `aaData` for compatibility with DataTables 1.9-) when obtaining data
* from an Ajax source or for server-side processing - this parameter
* allows that property to be changed. You can use Javascript dotted
* object notation to get a data source for multiple levels of nesting, or
* it my be used as a function. As a function it takes a single parameter,
* the JSON returned from the server, which can be manipulated as
* required, with the returned value being that used by DataTables as the
* data source for the table. This supersedes `sAjaxDataProp` from
* DataTables 1.9-.
*
* * `success` - Should not be overridden it is used internally in
* DataTables. To manipulate / transform the data returned by the server
* use `ajax.dataSrc`, or use `ajax` as a function (see below).
*
* `function`
* ----------
*
* As a function, making the Ajax call is left up to yourself allowing
* complete control of the Ajax request. Indeed, if desired, a method other
* than Ajax could be used to obtain the required data, such as Web storage
* or an AIR database.
*
* The function is given four parameters and no return is required. The
* parameters are:
*
* 1. _object_ - Data to send to the server
* 2. _function_ - Callback function that must be executed when the required
* data has been obtained. That data should be passed into the callback
* as the only parameter
* 3. _object_ - DataTables settings object for the table
*
* Note that this supersedes `fnServerData` from DataTables 1.9-.
*
* @type string|object|function
* @default null
*
* @dtopt Option
* @name DataTable.defaults.ajax
* @since 1.10.0
*
* @example
* // Get JSON data from a file via Ajax.
* // Note DataTables expects data in the form `{ data: [ ...data... ] }` by default).
* $('#example').dataTable( {
* "ajax": "data.json"
* } );
*
* @example
* // Get JSON data from a file via Ajax, using `dataSrc` to change
* // `data` to `tableData` (i.e. `{ tableData: [ ...data... ] }`)
* $('#example').dataTable( {
* "ajax": {
* "url": "data.json",
* "dataSrc": "tableData"
* }
* } );
*
* @example
* // Get JSON data from a file via Ajax, using `dataSrc` to read data
* // from a plain array rather than an array in an object
* $('#example').dataTable( {
* "ajax": {
* "url": "data.json",
* "dataSrc": ""
* }
* } );
*
* @example
* // Manipulate the data returned from the server - add a link to data
* // (note this can, should, be done using `render` for the column - this
* // is just a simple example of how the data can be manipulated).
* $('#example').dataTable( {
* "ajax": {
* "url": "data.json",
* "dataSrc": function ( json ) {
* for ( var i=0, ien=json.length ; i<ien ; i++ ) {
* json[i][0] = '<a href="/message/'+json[i][0]+'>View message</a>';
* }
* return json;
* }
* }
* } );
*
* @example
* // Add data to the request
* $('#example').dataTable( {
* "ajax": {
* "url": "data.json",
* "data": function ( d ) {
* return {
* "extra_search": $('#extra').val()
* };
* }
* }
* } );
*
* @example
* // Send request as POST
* $('#example').dataTable( {
* "ajax": {
* "url": "data.json",
* "type": "POST"
* }
* } );
*
* @example
* // Get the data from localStorage (could interface with a form for
* // adding, editing and removing rows).
* $('#example').dataTable( {
* "ajax": function (data, callback, settings) {
* callback(
* JSON.parse( localStorage.getItem('dataTablesData') )
* );
* }
* } );
*/
"ajax": null,
/**
* This parameter allows you to readily specify the entries in the length drop
* down menu that DataTables shows when pagination is enabled. It can be
* either a 1D array of options which will be used for both the displayed
* option and the value, or a 2D array which will use the array in the first
* position as the value, and the array in the second position as the
* displayed options (useful for language strings such as 'All').
*
* Note that the `pageLength` property will be automatically set to the
* first value given in this array, unless `pageLength` is also provided.
* @type array
* @default [ 10, 25, 50, 100 ]
*
* @dtopt Option
* @name DataTable.defaults.lengthMenu
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "lengthMenu": [[10, 25, 50, -1], [10, 25, 50, "All"]]
* } );
* } );
*/
"aLengthMenu": [ 10, 25, 50, 100 ],
/**
* The `columns` option in the initialisation parameter allows you to define
* details about the way individual columns behave. For a full list of
* column options that can be set, please see
* {@link DataTable.defaults.column}. Note that if you use `columns` to
* define your columns, you must have an entry in the array for every single
* column that you have in your table (these can be null if you don't which
* to specify any options).
* @member
*
* @name DataTable.defaults.column
*/
"aoColumns": null,
/**
* Very similar to `columns`, `columnDefs` allows you to target a specific
* column, multiple columns, or all columns, using the `targets` property of
* each object in the array. This allows great flexibility when creating
* tables, as the `columnDefs` arrays can be of any length, targeting the
* columns you specifically want. `columnDefs` may use any of the column
* options available: {@link DataTable.defaults.column}, but it _must_
* have `targets` defined in each object in the array. Values in the `targets`
* array may be:
* <ul>
* <li>a string - class name will be matched on the TH for the column</li>
* <li>0 or a positive integer - column index counting from the left</li>
* <li>a negative integer - column index counting from the right</li>
* <li>the string "_all" - all columns (i.e. assign a default)</li>
* </ul>
* @member
*
* @name DataTable.defaults.columnDefs
*/
"aoColumnDefs": null,
/**
* Basically the same as `search`, this parameter defines the individual column
* filtering state at initialisation time. The array must be of the same size
* as the number of columns, and each element be an object with the parameters
* `search` and `escapeRegex` (the latter is optional). 'null' is also
* accepted and the default will be used.
* @type array
* @default []
*
* @dtopt Option
* @name DataTable.defaults.searchCols
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "searchCols": [
* null,
* { "search": "My filter" },
* null,
* { "search": "^[0-9]", "escapeRegex": false }
* ]
* } );
* } )
*/
"aoSearchCols": [],
/**
* An array of CSS classes that should be applied to displayed rows. This
* array may be of any length, and DataTables will apply each class
* sequentially, looping when required.
* @type array
* @default null <i>Will take the values determined by the `oClasses.stripe*`
* options</i>
*
* @dtopt Option
* @name DataTable.defaults.stripeClasses
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "stripeClasses": [ 'strip1', 'strip2', 'strip3' ]
* } );
* } )
*/
"asStripeClasses": null,
/**
* Enable or disable automatic column width calculation. This can be disabled
* as an optimisation (it takes some time to calculate the widths) if the
* tables widths are passed in using `columns`.
* @type boolean
* @default true
*
* @dtopt Features
* @name DataTable.defaults.autoWidth
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "autoWidth": false
* } );
* } );
*/
"bAutoWidth": true,
/**
* Deferred rendering can provide DataTables with a huge speed boost when you
* are using an Ajax or JS data source for the table. This option, when set to
* true, will cause DataTables to defer the creation of the table elements for
* each row until they are needed for a draw - saving a significant amount of
* time.
* @type boolean
* @default false
*
* @dtopt Features
* @name DataTable.defaults.deferRender
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "ajax": "sources/arrays.txt",
* "deferRender": true
* } );
* } );
*/
"bDeferRender": false,
/**
* Replace a DataTable which matches the given selector and replace it with
* one which has the properties of the new initialisation object passed. If no
* table matches the selector, then the new DataTable will be constructed as
* per normal.
* @type boolean
* @default false
*
* @dtopt Options
* @name DataTable.defaults.destroy
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "srollY": "200px",
* "paginate": false
* } );
*
* // Some time later....
* $('#example').dataTable( {
* "filter": false,
* "destroy": true
* } );
* } );
*/
"bDestroy": false,
/**
* Enable or disable filtering of data. Filtering in DataTables is "smart" in
* that it allows the end user to input multiple words (space separated) and
* will match a row containing those words, even if not in the order that was
* specified (this allow matching across multiple columns). Note that if you
* wish to use filtering in DataTables this must remain 'true' - to remove the
* default filtering input box and retain filtering abilities, please use
* {@link DataTable.defaults.dom}.
* @type boolean
* @default true
*
* @dtopt Features
* @name DataTable.defaults.searching
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "searching": false
* } );
* } );
*/
"bFilter": true,
/**
* Enable or disable the table information display. This shows information
* about the data that is currently visible on the page, including information
* about filtered data if that action is being performed.
* @type boolean
* @default true
*
* @dtopt Features
* @name DataTable.defaults.info
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "info": false
* } );
* } );
*/
"bInfo": true,
/**
* Allows the end user to select the size of a formatted page from a select
* menu (sizes are 10, 25, 50 and 100). Requires pagination (`paginate`).
* @type boolean
* @default true
*
* @dtopt Features
* @name DataTable.defaults.lengthChange
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "lengthChange": false
* } );
* } );
*/
"bLengthChange": true,
/**
* Enable or disable pagination.
* @type boolean
* @default true
*
* @dtopt Features
* @name DataTable.defaults.paging
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "paging": false
* } );
* } );
*/
"bPaginate": true,
/**
* Enable or disable the display of a 'processing' indicator when the table is
* being processed (e.g. a sort). This is particularly useful for tables with
* large amounts of data where it can take a noticeable amount of time to sort
* the entries.
* @type boolean
* @default false
*
* @dtopt Features
* @name DataTable.defaults.processing
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "processing": true
* } );
* } );
*/
"bProcessing": false,
/**
* Retrieve the DataTables object for the given selector. Note that if the
* table has already been initialised, this parameter will cause DataTables
* to simply return the object that has already been set up - it will not take
* account of any changes you might have made to the initialisation object
* passed to DataTables (setting this parameter to true is an acknowledgement
* that you understand this). `destroy` can be used to reinitialise a table if
* you need.
* @type boolean
* @default false
*
* @dtopt Options
* @name DataTable.defaults.retrieve
*
* @example
* $(document).ready( function() {
* initTable();
* tableActions();
* } );
*
* function initTable ()
* {
* return $('#example').dataTable( {
* "scrollY": "200px",
* "paginate": false,
* "retrieve": true
* } );
* }
*
* function tableActions ()
* {
* var table = initTable();
* // perform API operations with oTable
* }
*/
"bRetrieve": false,
/**
* When vertical (y) scrolling is enabled, DataTables will force the height of
* the table's viewport to the given height at all times (useful for layout).
* However, this can look odd when filtering data down to a small data set,
* and the footer is left "floating" further down. This parameter (when
* enabled) will cause DataTables to collapse the table's viewport down when
* the result set will fit within the given Y height.
* @type boolean
* @default false
*
* @dtopt Options
* @name DataTable.defaults.scrollCollapse
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "scrollY": "200",
* "scrollCollapse": true
* } );
* } );
*/
"bScrollCollapse": false,
/**
* Configure DataTables to use server-side processing. Note that the
* `ajax` parameter must also be given in order to give DataTables a
* source to obtain the required data for each draw.
* @type boolean
* @default false
*
* @dtopt Features
* @dtopt Server-side
* @name DataTable.defaults.serverSide
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "serverSide": true,
* "ajax": "xhr.php"
* } );
* } );
*/
"bServerSide": false,
/**
* Enable or disable sorting of columns. Sorting of individual columns can be
* disabled by the `sortable` option for each column.
* @type boolean
* @default true
*
* @dtopt Features
* @name DataTable.defaults.ordering
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "ordering": false
* } );
* } );
*/
"bSort": true,
/**
* Enable or display DataTables' ability to sort multiple columns at the
* same time (activated by shift-click by the user).
* @type boolean
* @default true
*
* @dtopt Options
* @name DataTable.defaults.orderMulti
*
* @example
* // Disable multiple column sorting ability
* $(document).ready( function () {
* $('#example').dataTable( {
* "orderMulti": false
* } );
* } );
*/
"bSortMulti": true,
/**
* Allows control over whether DataTables should use the top (true) unique
* cell that is found for a single column, or the bottom (false - default).
* This is useful when using complex headers.
* @type boolean
* @default false
*
* @dtopt Options
* @name DataTable.defaults.orderCellsTop
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "orderCellsTop": true
* } );
* } );
*/
"bSortCellsTop": false,
/**
* Enable or disable the addition of the classes `sorting\_1`, `sorting\_2` and
* `sorting\_3` to the columns which are currently being sorted on. This is
* presented as a feature switch as it can increase processing time (while
* classes are removed and added) so for large data sets you might want to
* turn this off.
* @type boolean
* @default true
*
* @dtopt Features
* @name DataTable.defaults.orderClasses
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "orderClasses": false
* } );
* } );
*/
"bSortClasses": true,
/**
* Enable or disable state saving. When enabled HTML5 `localStorage` will be
* used to save table display information such as pagination information,
* display length, filtering and sorting. As such when the end user reloads
* the page the display display will match what thy had previously set up.
*
* Due to the use of `localStorage` the default state saving is not supported
* in IE6 or 7. If state saving is required in those browsers, use
* `stateSaveCallback` to provide a storage solution such as cookies.
* @type boolean
* @default false
*
* @dtopt Features
* @name DataTable.defaults.stateSave
*
* @example
* $(document).ready( function () {
* $('#example').dataTable( {
* "stateSave": true
* } );
* } );
*/
"bStateSave": false,
/**
* This function is called when a TR element is created (and all TD child
* elements have been inserted), or registered if using a DOM source, allowing
* manipulation of the TR element (adding classes etc).
* @type function
* @param {node} row "TR" element for the current row
* @param {array} data Raw data array for this row
* @param {int} dataIndex The index of this row in the internal aoData array
*
* @dtopt Callbacks
* @name DataTable.defaults.createdRow
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "createdRow": function( row, data, dataIndex ) {
* // Bold the grade for all 'A' grade browsers
* if ( data[4] == "A" )
* {
* $('td:eq(4)', row).html( '<b>A</b>' );
* }
* }
* } );
* } );
*/
"fnCreatedRow": null,
/**
* This function is called on every 'draw' event, and allows you to
* dynamically modify any aspect you want about the created DOM.
* @type function
* @param {object} settings DataTables settings object
*
* @dtopt Callbacks
* @name DataTable.defaults.drawCallback
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "drawCallback": function( settings ) {
* alert( 'DataTables has redrawn the table' );
* }
* } );
* } );
*/
"fnDrawCallback": null,
/**
* Identical to fnHeaderCallback() but for the table footer this function
* allows you to modify the table footer on every 'draw' event.
* @type function
* @param {node} foot "TR" element for the footer
* @param {array} data Full table data (as derived from the original HTML)
* @param {int} start Index for the current display starting point in the
* display array
* @param {int} end Index for the current display ending point in the
* display array
* @param {array int} display Index array to translate the visual position
* to the full data array
*
* @dtopt Callbacks
* @name DataTable.defaults.footerCallback
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "footerCallback": function( tfoot, data, start, end, display ) {
* tfoot.getElementsByTagName('th')[0].innerHTML = "Starting index is "+start;
* }
* } );
* } )
*/
"fnFooterCallback": null,
/**
* When rendering large numbers in the information element for the table
* (i.e. "1 su 10 di 57 righe") DataTables will render large numbers
* to have a comma separator for the 'thousands' units (e.g. 1 million is
* rendered as "1,000,000") to help readability for the end user. This
* function will override the default method DataTables uses.
* @type function
* @member
* @param {int} toFormat number to be formatted
* @returns {string} formatted string for DataTables to show the number
*
* @dtopt Callbacks
* @name DataTable.defaults.formatNumber
*
* @example
* // Format a number using a single quote for the separator (note that
* // this can also be done with the language.thousands option)
* $(document).ready( function() {
* $('#example').dataTable( {
* "formatNumber": function ( toFormat ) {
* return toFormat.toString().replace(
* /\B(?=(\d{3})+(?!\d))/g, "'"
* );
* };
* } );
* } );
*/
"fnFormatNumber": function ( toFormat ) {
return toFormat.toString().replace(
/\B(?=(\d{3})+(?!\d))/g,
this.oLanguage.sThousands
);
},
/**
* This function is called on every 'draw' event, and allows you to
* dynamically modify the header row. This can be used to calculate and
* display useful information about the table.
* @type function
* @param {node} head "TR" element for the header
* @param {array} data Full table data (as derived from the original HTML)
* @param {int} start Index for the current display starting point in the
* display array
* @param {int} end Index for the current display ending point in the
* display array
* @param {array int} display Index array to translate the visual position
* to the full data array
*
* @dtopt Callbacks
* @name DataTable.defaults.headerCallback
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "fheaderCallback": function( head, data, start, end, display ) {
* head.getElementsByTagName('th')[0].innerHTML = "Displaying "+(end-start)+" records";
* }
* } );
* } )
*/
"fnHeaderCallback": null,
/**
* The information element can be used to convey information about the current
* state of the table. Although the internationalisation options presented by
* DataTables are quite capable of dealing with most customisations, there may
* be times where you wish to customise the string further. This callback
* allows you to do exactly that.
* @type function
* @param {object} oSettings DataTables settings object
* @param {int} start Starting position in data for the draw
* @param {int} end End position in data for the draw
* @param {int} max Total number of rows in the table (regardless of
* filtering)
* @param {int} total Total number of rows in the data set, after filtering
* @param {string} pre The string that DataTables has formatted using it's
* own rules
* @returns {string} The string to be displayed in the information element.
*
* @dtopt Callbacks
* @name DataTable.defaults.infoCallback
*
* @example
* $('#example').dataTable( {
* "infoCallback": function( settings, start, end, max, total, pre ) {
* return start +" to "+ end;
* }
* } );
*/
"fnInfoCallback": null,
/**
* Called when the table has been initialised. Normally DataTables will
* initialise sequentially and there will be no need for this function,
* however, this does not hold true when using external language information
* since that is obtained using an async XHR call.
* @type function
* @param {object} settings DataTables settings object
* @param {object} json The JSON object request from the server - only
* present if client-side Ajax sourced data is used
*
* @dtopt Callbacks
* @name DataTable.defaults.initComplete
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "initComplete": function(settings, json) {
* alert( 'DataTables has finished its initialisation.' );
* }
* } );
* } )
*/
"fnInitComplete": null,
/**
* Called at the very start of each table draw and can be used to cancel the
* draw by returning false, any other return (including undefined) results in
* the full draw occurring).
* @type function
* @param {object} settings DataTables settings object
* @returns {boolean} False will cancel the draw, anything else (including no
* return) will allow it to complete.
*
* @dtopt Callbacks
* @name DataTable.defaults.preDrawCallback
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "preDrawCallback": function( settings ) {
* if ( $('#test').val() == 1 ) {
* return false;
* }
* }
* } );
* } );
*/
"fnPreDrawCallback": null,
/**
* This function allows you to 'post process' each row after it have been
* generated for each table draw, but before it is rendered on screen. This
* function might be used for setting the row class name etc.
* @type function
* @param {node} row "TR" element for the current row
* @param {array} data Raw data array for this row
* @param {int} displayIndex The display index for the current table draw
* @param {int} displayIndexFull The index of the data in the full list of
* rows (after filtering)
*
* @dtopt Callbacks
* @name DataTable.defaults.rowCallback
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "rowCallback": function( row, data, displayIndex, displayIndexFull ) {
* // Bold the grade for all 'A' grade browsers
* if ( data[4] == "A" ) {
* $('td:eq(4)', row).html( '<b>A</b>' );
* }
* }
* } );
* } );
*/
"fnRowCallback": null,
/**
* __Deprecated__ The functionality provided by this parameter has now been
* superseded by that provided through `ajax`, which should be used instead.
*
* This parameter allows you to override the default function which obtains
* the data from the server so something more suitable for your application.
* For example you could use POST data, or pull information from a Gears or
* AIR database.
* @type function
* @member
* @param {string} source HTTP source to obtain the data from (`ajax`)
* @param {array} data A key/value pair object containing the data to send
* to the server
* @param {function} callback to be called on completion of the data get
* process that will draw the data on the page.
* @param {object} settings DataTables settings object
*
* @dtopt Callbacks
* @dtopt Server-side
* @name DataTable.defaults.serverData
*
* @deprecated 1.10. Please use `ajax` for this functionality now.
*/
"fnServerData": null,
/**
* __Deprecated__ The functionality provided by this parameter has now been
* superseded by that provided through `ajax`, which should be used instead.
*
* It is often useful to send extra data to the server when making an Ajax
* request - for example custom filtering information, and this callback
* function makes it trivial to send extra information to the server. The
* passed in parameter is the data set that has been constructed by
* DataTables, and you can add to this or modify it as you require.
* @type function
* @param {array} data Data array (array of objects which are name/value
* pairs) that has been constructed by DataTables and will be sent to the
* server. In the case of Ajax sourced data with server-side processing
* this will be an empty array, for server-side processing there will be a
* significant number of parameters!
* @returns {undefined} Ensure that you modify the data array passed in,
* as this is passed by reference.
*
* @dtopt Callbacks
* @dtopt Server-side
* @name DataTable.defaults.serverParams
*
* @deprecated 1.10. Please use `ajax` for this functionality now.
*/
"fnServerParams": null,
/**
* Load the table state. With this function you can define from where, and how, the
* state of a table is loaded. By default DataTables will load from `localStorage`
* but you might wish to use a server-side database or cookies.
* @type function
* @member
* @param {object} settings DataTables settings object
* @param {object} callback Callback that can be executed when done. It
* should be passed the loaded state object.
* @return {object} The DataTables state object to be loaded
*
* @dtopt Callbacks
* @name DataTable.defaults.stateLoadCallback
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "stateSave": true,
* "stateLoadCallback": function (settings, callback) {
* $.ajax( {
* "url": "/state_load",
* "dataType": "json",
* "success": function (json) {
* callback( json );
* }
* } );
* }
* } );
* } );
*/
"fnStateLoadCallback": function ( settings ) {
try {
return JSON.parse(
(settings.iStateDuration === -1 ? sessionStorage : localStorage).getItem(
'DataTables_'+settings.sInstance+'_'+location.pathname
)
);
} catch (e) {}
},
/**
* Callback which allows modification of the saved state prior to loading that state.
* This callback is called when the table is loading state from the stored data, but
* prior to the settings object being modified by the saved state. Note that for
* plug-in authors, you should use the `stateLoadParams` event to load parameters for
* a plug-in.
* @type function
* @param {object} settings DataTables settings object
* @param {object} data The state object that is to be loaded
*
* @dtopt Callbacks
* @name DataTable.defaults.stateLoadParams
*
* @example
* // Remove a saved filter, so filtering is never loaded
* $(document).ready( function() {
* $('#example').dataTable( {
* "stateSave": true,
* "stateLoadParams": function (settings, data) {
* data.oSearch.sSearch = "";
* }
* } );
* } );
*
* @example
* // Disallow state loading by returning false
* $(document).ready( function() {
* $('#example').dataTable( {
* "stateSave": true,
* "stateLoadParams": function (settings, data) {
* return false;
* }
* } );
* } );
*/
"fnStateLoadParams": null,
/**
* Callback that is called when the state has been loaded from the state saving method
* and the DataTables settings object has been modified as a result of the loaded state.
* @type function
* @param {object} settings DataTables settings object
* @param {object} data The state object that was loaded
*
* @dtopt Callbacks
* @name DataTable.defaults.stateLoaded
*
* @example
* // Show an alert with the filtering value that was saved
* $(document).ready( function() {
* $('#example').dataTable( {
* "stateSave": true,
* "stateLoaded": function (settings, data) {
* alert( 'Saved filter was: '+data.oSearch.sSearch );
* }
* } );
* } );
*/
"fnStateLoaded": null,
/**
* Save the table state. This function allows you to define where and how the state
* information for the table is stored By default DataTables will use `localStorage`
* but you might wish to use a server-side database or cookies.
* @type function
* @member
* @param {object} settings DataTables settings object
* @param {object} data The state object to be saved
*
* @dtopt Callbacks
* @name DataTable.defaults.stateSaveCallback
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "stateSave": true,
* "stateSaveCallback": function (settings, data) {
* // Send an Ajax request to the server with the state object
* $.ajax( {
* "url": "/state_save",
* "data": data,
* "dataType": "json",
* "method": "POST"
* "success": function () {}
* } );
* }
* } );
* } );
*/
"fnStateSaveCallback": function ( settings, data ) {
try {
(settings.iStateDuration === -1 ? sessionStorage : localStorage).setItem(
'DataTables_'+settings.sInstance+'_'+location.pathname,
JSON.stringify( data )
);
} catch (e) {}
},
/**
* Callback which allows modification of the state to be saved. Called when the table
* has changed state a new state save is required. This method allows modification of
* the state saving object prior to actually doing the save, including addition or
* other state properties or modification. Note that for plug-in authors, you should
* use the `stateSaveParams` event to save parameters for a plug-in.
* @type function
* @param {object} settings DataTables settings object
* @param {object} data The state object to be saved
*
* @dtopt Callbacks
* @name DataTable.defaults.stateSaveParams
*
* @example
* // Remove a saved filter, so filtering is never saved
* $(document).ready( function() {
* $('#example').dataTable( {
* "stateSave": true,
* "stateSaveParams": function (settings, data) {
* data.oSearch.sSearch = "";
* }
* } );
* } );
*/
"fnStateSaveParams": null,
/**
* Duration for which the saved state information is considered valid. After this period
* has elapsed the state will be returned to the default.
* Value is given in seconds.
* @type int
* @default 7200 <i>(2 hours)</i>
*
* @dtopt Options
* @name DataTable.defaults.stateDuration
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "stateDuration": 60*60*24; // 1 day
* } );
* } )
*/
"iStateDuration": 7200,
/**
* When enabled DataTables will not make a request to the server for the first
* page draw - rather it will use the data already on the page (no sorting etc
* will be applied to it), thus saving on an XHR at load time. `deferLoading`
* is used to indicate that deferred loading is required, but it is also used
* to tell DataTables how many records there are in the full table (allowing
* the information element and pagination to be displayed correctly). In the case
* where a filtering is applied to the table on initial load, this can be
* indicated by giving the parameter as an array, where the first element is
* the number of records available after filtering and the second element is the
* number of records without filtering (allowing the table information element
* to be shown correctly).
* @type int | array
* @default null
*
* @dtopt Options
* @name DataTable.defaults.deferLoading
*
* @example
* // 57 records available in the table, no filtering applied
* $(document).ready( function() {
* $('#example').dataTable( {
* "serverSide": true,
* "ajax": "scripts/server_processing.php",
* "deferLoading": 57
* } );
* } );
*
* @example
* // 57 records after filtering, 100 without filtering (an initial filter applied)
* $(document).ready( function() {
* $('#example').dataTable( {
* "serverSide": true,
* "ajax": "scripts/server_processing.php",
* "deferLoading": [ 57, 100 ],
* "search": {
* "search": "my_filter"
* }
* } );
* } );
*/
"iDeferLoading": null,
/**
* Number of rows to display on a single page when using pagination. If
* feature enabled (`lengthChange`) then the end user will be able to override
* this to a custom setting using a pop-up menu.
* @type int
* @default 10
*
* @dtopt Options
* @name DataTable.defaults.pageLength
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "pageLength": 50
* } );
* } )
*/
"iDisplayLength": 10,
/**
* Define the starting point for data display when using DataTables with
* pagination. Note that this parameter is the number of records, rather than
* the page number, so if you have 10 records per page and want to start on
* the third page, it should be "20".
* @type int
* @default 0
*
* @dtopt Options
* @name DataTable.defaults.displayStart
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "displayStart": 20
* } );
* } )
*/
"iDisplayStart": 0,
/**
* By default DataTables allows keyboard navigation of the table (sorting, paging,
* and filtering) by adding a `tabindex` attribute to the required elements. This
* allows you to tab through the controls and press the enter key to activate them.
* The tabindex is default 0, meaning that the tab follows the flow of the document.
* You can overrule this using this parameter if you wish. Use a value of -1 to
* disable built-in keyboard navigation.
* @type int
* @default 0
*
* @dtopt Options
* @name DataTable.defaults.tabIndex
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "tabIndex": 1
* } );
* } );
*/
"iTabIndex": 0,
/**
* Classes that DataTables assigns to the various components and features
* that it adds to the HTML table. This allows classes to be configured
* during initialisation in addition to through the static
* {@link DataTable.ext.oStdClasses} object).
* @namespace
* @name DataTable.defaults.classes
*/
"oClasses": {},
/**
* All strings that DataTables uses in the user interface that it creates
* are defined in this object, allowing you to modified them individually or
* completely replace them all as required.
* @namespace
* @name DataTable.defaults.language
*/
"oLanguage": {
/**
* Strings that are used for WAI-ARIA labels and controls only (these are not
* actually visible on the page, but will be read by screenreaders, and thus
* must be internationalised as well).
* @namespace
* @name DataTable.defaults.language.aria
*/
"oAria": {
/**
* ARIA label that is added to the table headers when the column may be
* sorted ascending by activing the column (click or return when focused).
* Note that the column header is prefixed to this string.
* @type string
* @default : activate to sort column ascending
*
* @dtopt Language
* @name DataTable.defaults.language.aria.sortAscending
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "aria": {
* "sortAscending": " - click/return to sort ascending"
* }
* }
* } );
* } );
*/
"sSortAscending": ": activate to sort column ascending",
/**
* ARIA label that is added to the table headers when the column may be
* sorted descending by activing the column (click or return when focused).
* Note that the column header is prefixed to this string.
* @type string
* @default : activate to sort column ascending
*
* @dtopt Language
* @name DataTable.defaults.language.aria.sortDescending
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "aria": {
* "sortDescending": " - click/return to sort descending"
* }
* }
* } );
* } );
*/
"sSortDescending": ": activate to sort column descending"
},
/**
* Pagination string used by DataTables for the built-in pagination
* control types.
* @namespace
* @name DataTable.defaults.language.paginate
*/
"oPaginate": {
/**
* Text to use when using the 'full_numbers' type of pagination for the
* button to take the user to the first page.
* @type string
* @default First
*
* @dtopt Language
* @name DataTable.defaults.language.paginate.first
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "paginate": {
* "first": "First page"
* }
* }
* } );
* } );
*/
"sFirst": "First",
/**
* Text to use when using the 'full_numbers' type of pagination for the
* button to take the user to the last page.
* @type string
* @default Last
*
* @dtopt Language
* @name DataTable.defaults.language.paginate.last
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "paginate": {
* "last": "Last page"
* }
* }
* } );
* } );
*/
"sLast": "Last",
/**
* Text to use for the 'next' pagination button (to take the user to the
* next page).
* @type string
* @default Next
*
* @dtopt Language
* @name DataTable.defaults.language.paginate.next
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "paginate": {
* "next": "Next page"
* }
* }
* } );
* } );
*/
"sNext": "Next",
/**
* Text to use for the 'previous' pagination button (to take the user to
* the previous page).
* @type string
* @default Previous
*
* @dtopt Language
* @name DataTable.defaults.language.paginate.previous
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "paginate": {
* "previous": "Previous page"
* }
* }
* } );
* } );
*/
"sPrevious": "Previous"
},
/**
* This string is shown in preference to `zeroRecords` when the table is
* empty of data (regardless of filtering). Note that this is an optional
* parameter - if it is not given, the value of `zeroRecords` will be used
* instead (either the default or given value).
* @type string
* @default No data available in table
*
* @dtopt Language
* @name DataTable.defaults.language.emptyTable
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "emptyTable": "No data available in table"
* }
* } );
* } );
*/
"sEmptyTable": "Non ci sono dati disponibili nella tabella",
/**
* This string gives information to the end user about the information
* that is current on display on the page. The following tokens can be
* used in the string and will be dynamically replaced as the table
* display updates. This tokens can be placed anywhere in the string, or
* removed as needed by the language requires:
*
* * `\_START\_` - Display index of the first record on the current page
* * `\_END\_` - Display index of the last record on the current page
* * `\_TOTAL\_` - Number of records in the table after filtering
* * `\_MAX\_` - Number of records in the table without filtering
* * `\_PAGE\_` - Current page number
* * `\_PAGES\_` - Total number of pages of data in the table
*
* @type string
* @default Showing _START_ to _END_ of _TOTAL_ entries
*
* @dtopt Language
* @name DataTable.defaults.language.info
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "info": "Showing page _PAGE_ of _PAGES_"
* }
* } );
* } );
*/
"sInfo": "_START_ su _END_ di _TOTAL_ righe",
/**
* Display information string for when the table is empty. Typically the
* format of this string should match `info`.
* @type string
* @default Showing 0 to 0 of 0 entries
*
* @dtopt Language
* @name DataTable.defaults.language.infoEmpty
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "infoEmpty": "Non ci sono righe"
* }
* } );
* } );
*/
"sInfoEmpty": "Non ci sono righe",
/**
* When a user filters the information in a table, this string is appended
* to the information (`info`) to give an idea of how strong the filtering
* is. The variable _MAX_ is dynamically updated.
* @type string
* @default (filtered from _MAX_ total entries)
*
* @dtopt Language
* @name DataTable.defaults.language.infoFiltered
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "infoFiltered": " - filtering from _MAX_ records"
* }
* } );
* } );
*/
"sInfoFiltered": "(filtered from _MAX_ total entries)",
/**
* If can be useful to append extra information to the info string at times,
* and this variable does exactly that. This information will be appended to
* the `info` (`infoEmpty` and `infoFiltered` in whatever combination they are
* being used) at all times.
* @type string
* @default <i>Empty string</i>
*
* @dtopt Language
* @name DataTable.defaults.language.infoPostFix
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "infoPostFix": "All records shown are derived from real information."
* }
* } );
* } );
*/
"sInfoPostFix": "",
/**
* This decimal place operator is a little different from the other
* language options since DataTables doesn't output floating point
* numbers, so it won't ever use this for display of a number. Rather,
* what this parameter does is modify the sort methods of the table so
* that numbers which are in a format which has a character other than
* a period (`.`) as a decimal place will be sorted numerically.
*
* Note that numbers with different decimal places cannot be shown in
* the same table and still be sortable, the table must be consistent.
* However, multiple different tables on the page can use different
* decimal place characters.
* @type string
* @default
*
* @dtopt Language
* @name DataTable.defaults.language.decimal
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "decimal": ","
* "thousands": "."
* }
* } );
* } );
*/
"sDecimal": "",
/**
* DataTables has a build in number formatter (`formatNumber`) which is
* used to format large numbers that are used in the table information.
* By default a comma is used, but this can be trivially changed to any
* character you wish with this parameter.
* @type string
* @default ,
*
* @dtopt Language
* @name DataTable.defaults.language.thousands
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "thousands": "'"
* }
* } );
* } );
*/
"sThousands": ",",
/**
* Detail the action that will be taken when the drop down menu for the
* pagination length option is changed. The '_MENU_' variable is replaced
* with a default select list of 10, 25, 50 and 100, and can be replaced
* with a custom select box if required.
* @type string
* @default Show _MENU_ entries
*
* @dtopt Language
* @name DataTable.defaults.language.lengthMenu
*
* @example
* // Language change only
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "lengthMenu": "Display _MENU_ records"
* }
* } );
* } );
*
* @example
* // Language and options change
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "lengthMenu": 'Display <select>'+
* '<option value="10">10</option>'+
* '<option value="20">20</option>'+
* '<option value="30">30</option>'+
* '<option value="40">40</option>'+
* '<option value="50">50</option>'+
* '<option value="-1">All</option>'+
* '</select> records'
* }
* } );
* } );
*/
"sLengthMenu": "Mostra _MENU_ righe",
/**
* When using Ajax sourced data and during the first draw when DataTables is
* gathering the data, this message is shown in an empty row in the table to
* indicate to the end user the the data is being loaded. Note that this
* parameter is not used when loading data by server-side processing, just
* Ajax sourced data with client-side processing.
* @type string
* @default Loading...
*
* @dtopt Language
* @name DataTable.defaults.language.loadingRecords
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "loadingRecords": "Per favore attendi - carico..."
* }
* } );
* } );
*/
"sLoadingRecords": "Carico...",
/**
* Text which is displayed when the table is processing a user action
* (usually a sort command or similar).
* @type string
* @default Processing...
*
* @dtopt Language
* @name DataTable.defaults.language.processing
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "processing": "DataTables is currently busy"
* }
* } );
* } );
*/
"sProcessing": "Processo...",
/**
* Details the actions that will be taken when the user types into the
* filtering input text box. The variable "_INPUT_", if used in the string,
* is replaced with the HTML text box for the filtering input allowing
* control over where it appears in the string. If "_INPUT_" is not given
* then the input box is appended to the string automatically.
* @type string
* @default Search:
*
* @dtopt Language
* @name DataTable.defaults.language.search
*
* @example
* // Input text box will be appended at the end automatically
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "search": "Filter records:"
* }
* } );
* } );
*
* @example
* // Specify where the filter should appear
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "search": "Apply filter _INPUT_ to table"
* }
* } );
* } );
*/
"sSearch": "Cerca:",
/**
* Assign a `placeholder` attribute to the search `input` element
* @type string
* @default
*
* @dtopt Language
* @name DataTable.defaults.language.searchPlaceholder
*/
"sSearchPlaceholder": "",
/**
* All of the language information can be stored in a file on the
* server-side, which DataTables will look up if this parameter is passed.
* It must store the URL of the language file, which is in a JSON format,
* and the object has the same properties as the oLanguage object in the
* initialiser object (i.e. the above parameters). Please refer to one of
* the example language files to see how this works in action.
* @type string
* @default <i>Empty string - i.e. disabled</i>
*
* @dtopt Language
* @name DataTable.defaults.language.url
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "url": "http://www.sprymedia.co.uk/dataTables/lang.txt"
* }
* } );
* } );
*/
"sUrl": "",
/**
* Text shown inside the table records when the is no information to be
* displayed after filtering. `emptyTable` is shown when there is simply no
* information in the table at all (regardless of filtering).
* @type string
* @default No matching records found
*
* @dtopt Language
* @name DataTable.defaults.language.zeroRecords
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "language": {
* "zeroRecords": "No records to display"
* }
* } );
* } );
*/
"sZeroRecords": "Non ci sono righe"
},
/**
* This parameter allows you to have define the global filtering state at
* initialisation time. As an object the `search` parameter must be
* defined, but all other parameters are optional. When `regex` is true,
* the search string will be treated as a regular expression, when false
* (default) it will be treated as a straight string. When `smart`
* DataTables will use it's smart filtering methods (to word match at
* any point in the data), when false this will not be done.
* @namespace
* @extends DataTable.models.oSearch
*
* @dtopt Options
* @name DataTable.defaults.search
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "search": {"search": "Initial search"}
* } );
* } )
*/
"oSearch": $.extend( {}, DataTable.models.oSearch ),
/**
* __Deprecated__ The functionality provided by this parameter has now been
* superseded by that provided through `ajax`, which should be used instead.
*
* By default DataTables will look for the property `data` (or `aaData` for
* compatibility with DataTables 1.9-) when obtaining data from an Ajax
* source or for server-side processing - this parameter allows that
* property to be changed. You can use Javascript dotted object notation to
* get a data source for multiple levels of nesting.
* @type string
* @default data
*
* @dtopt Options
* @dtopt Server-side
* @name DataTable.defaults.ajaxDataProp
*
* @deprecated 1.10. Please use `ajax` for this functionality now.
*/
"sAjaxDataProp": "data",
/**
* __Deprecated__ The functionality provided by this parameter has now been
* superseded by that provided through `ajax`, which should be used instead.
*
* You can instruct DataTables to load data from an external
* source using this parameter (use aData if you want to pass data in you
* already have). Simply provide a url a JSON object can be obtained from.
* @type string
* @default null
*
* @dtopt Options
* @dtopt Server-side
* @name DataTable.defaults.ajaxSource
*
* @deprecated 1.10. Please use `ajax` for this functionality now.
*/
"sAjaxSource": null,
/**
* This initialisation variable allows you to specify exactly where in the
* DOM you want DataTables to inject the various controls it adds to the page
* (for example you might want the pagination controls at the top of the
* table). DIV elements (with or without a custom class) can also be added to
* aid styling. The follow syntax is used:
* <ul>
* <li>The following options are allowed:
* <ul>
* <li>'l' - Length changing</li>
* <li>'f' - Filtering input</li>
* <li>'t' - The table!</li>
* <li>'i' - Information</li>
* <li>'p' - Pagination</li>
* <li>'r' - pRocessing</li>
* </ul>
* </li>
* <li>The following constants are allowed:
* <ul>
* <li>'H' - jQueryUI theme "header" classes ('fg-toolbar ui-widget-header ui-corner-tl ui-corner-tr ui-helper-clearfix')</li>
* <li>'F' - jQueryUI theme "footer" classes ('fg-toolbar ui-widget-header ui-corner-bl ui-corner-br ui-helper-clearfix')</li>
* </ul>
* </li>
* <li>The following syntax is expected:
* <ul>
* <li>'<' and '>' - div elements</li>
* <li>'<"class" and '>' - div with a class</li>
* <li>'<"#id" and '>' - div with an ID</li>
* </ul>
* </li>
* <li>Examples:
* <ul>
* <li>'<"wrapper"flipt>'</li>
* <li>'<lf<t>ip>'</li>
* </ul>
* </li>
* </ul>
* @type string
* @default lfrtip <i>(when `jQueryUI` is false)</i> <b>or</b>
* <"H"lfr>t<"F"ip> <i>(when `jQueryUI` is true)</i>
*
* @dtopt Options
* @name DataTable.defaults.dom
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "dom": '<"top"i>rt<"bottom"flp><"clear">'
* } );
* } );
*/
"sDom": "lfrtip",
/**
* Search delay option. This will throttle full table searches that use the
* DataTables provided search input element (it does not effect calls to
* `dt-api search()`, providing a delay before the search is made.
* @type integer
* @default 0
*
* @dtopt Options
* @name DataTable.defaults.searchDelay
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "searchDelay": 200
* } );
* } )
*/
"searchDelay": null,
/**
* DataTables features six different built-in options for the buttons to
* display for pagination control:
*
* * `numbers` - Page number buttons only
* * `simple` - 'Previous' and 'Next' buttons only
* * 'simple_numbers` - 'Previous' and 'Next' buttons, plus page numbers
* * `full` - 'First', 'Previous', 'Next' and 'Last' buttons
* * `full_numbers` - 'First', 'Previous', 'Next' and 'Last' buttons, plus page numbers
* * `first_last_numbers` - 'First' and 'Last' buttons, plus page numbers
*
* Further methods can be added using {@link DataTable.ext.oPagination}.
* @type string
* @default simple_numbers
*
* @dtopt Options
* @name DataTable.defaults.pagingType
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "pagingType": "full_numbers"
* } );
* } )
*/
"sPaginationType": "simple_numbers",
/**
* Enable horizontal scrolling. When a table is too wide to fit into a
* certain layout, or you have a large number of columns in the table, you
* can enable x-scrolling to show the table in a viewport, which can be
* scrolled. This property can be `true` which will allow the table to
* scroll horizontally when needed, or any CSS unit, or a number (in which
* case it will be treated as a pixel measurement). Setting as simply `true`
* is recommended.
* @type boolean|string
* @default <i>blank string - i.e. disabled</i>
*
* @dtopt Features
* @name DataTable.defaults.scrollX
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "scrollX": true,
* "scrollCollapse": true
* } );
* } );
*/
"sScrollX": "",
/**
* This property can be used to force a DataTable to use more width than it
* might otherwise do when x-scrolling is enabled. For example if you have a
* table which requires to be well spaced, this parameter is useful for
* "over-sizing" the table, and thus forcing scrolling. This property can by
* any CSS unit, or a number (in which case it will be treated as a pixel
* measurement).
* @type string
* @default <i>blank string - i.e. disabled</i>
*
* @dtopt Options
* @name DataTable.defaults.scrollXInner
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "scrollX": "100%",
* "scrollXInner": "110%"
* } );
* } );
*/
"sScrollXInner": "",
/**
* Enable vertical scrolling. Vertical scrolling will constrain the DataTable
* to the given height, and enable scrolling for any data which overflows the
* current viewport. This can be used as an alternative to paging to display
* a lot of data in a small area (although paging and scrolling can both be
* enabled at the same time). This property can be any CSS unit, or a number
* (in which case it will be treated as a pixel measurement).
* @type string
* @default <i>blank string - i.e. disabled</i>
*
* @dtopt Features
* @name DataTable.defaults.scrollY
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "scrollY": "200px",
* "paginate": false
* } );
* } );
*/
"sScrollY": "",
/**
* __Deprecated__ The functionality provided by this parameter has now been
* superseded by that provided through `ajax`, which should be used instead.
*
* Set the HTTP method that is used to make the Ajax call for server-side
* processing or Ajax sourced data.
* @type string
* @default GET
*
* @dtopt Options
* @dtopt Server-side
* @name DataTable.defaults.serverMethod
*
* @deprecated 1.10. Please use `ajax` for this functionality now.
*/
"sServerMethod": "GET",
/**
* DataTables makes use of renderers when displaying HTML elements for
* a table. These renderers can be added or modified by plug-ins to
* generate suitable mark-up for a site. For example the Bootstrap
* integration plug-in for DataTables uses a paging button renderer to
* display pagination buttons in the mark-up required by Bootstrap.
*
* For further information about the renderers available see
* DataTable.ext.renderer
* @type string|object
* @default null
*
* @name DataTable.defaults.renderer
*
*/
"renderer": null,
/**
* Set the data property name that DataTables should use to get a row's id
* to set as the `id` property in the node.
* @type string
* @default DT_RowId
*
* @name DataTable.defaults.rowId
*/
"rowId": "DT_RowId"
};
_fnHungarianMap( DataTable.defaults );
/*
* Developer note - See note in model.defaults.js about the use of Hungarian
* notation and camel case.
*/
/**
* Column options that can be given to DataTables at initialisation time.
* @namespace
*/
DataTable.defaults.column = {
/**
* Define which column(s) an order will occur on for this column. This
* allows a column's ordering to take multiple columns into account when
* doing a sort or use the data from a different column. For example first
* name / last name columns make sense to do a multi-column sort over the
* two columns.
* @type array|int
* @default null <i>Takes the value of the column index automatically</i>
*
* @name DataTable.defaults.column.orderData
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "orderData": [ 0, 1 ], "targets": [ 0 ] },
* { "orderData": [ 1, 0 ], "targets": [ 1 ] },
* { "orderData": 2, "targets": [ 2 ] }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "orderData": [ 0, 1 ] },
* { "orderData": [ 1, 0 ] },
* { "orderData": 2 },
* null,
* null
* ]
* } );
* } );
*/
"aDataSort": null,
"iDataSort": -1,
/**
* You can control the default ordering direction, and even alter the
* behaviour of the sort handler (i.e. only allow ascending ordering etc)
* using this parameter.
* @type array
* @default [ 'asc', 'desc' ]
*
* @name DataTable.defaults.column.orderSequence
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "orderSequence": [ "asc" ], "targets": [ 1 ] },
* { "orderSequence": [ "desc", "asc", "asc" ], "targets": [ 2 ] },
* { "orderSequence": [ "desc" ], "targets": [ 3 ] }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* null,
* { "orderSequence": [ "asc" ] },
* { "orderSequence": [ "desc", "asc", "asc" ] },
* { "orderSequence": [ "desc" ] },
* null
* ]
* } );
* } );
*/
"asSorting": [ 'asc', 'desc' ],
/**
* Enable or disable filtering on the data in this column.
* @type boolean
* @default true
*
* @name DataTable.defaults.column.searchable
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "searchable": false, "targets": [ 0 ] }
* ] } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "searchable": false },
* null,
* null,
* null,
* null
* ] } );
* } );
*/
"bSearchable": true,
/**
* Enable or disable ordering on this column.
* @type boolean
* @default true
*
* @name DataTable.defaults.column.orderable
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "orderable": false, "targets": [ 0 ] }
* ] } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "orderable": false },
* null,
* null,
* null,
* null
* ] } );
* } );
*/
"bSortable": true,
/**
* Enable or disable the display of this column.
* @type boolean
* @default true
*
* @name DataTable.defaults.column.visible
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "visible": false, "targets": [ 0 ] }
* ] } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "visible": false },
* null,
* null,
* null,
* null
* ] } );
* } );
*/
"bVisible": true,
/**
* Developer definable function that is called whenever a cell is created (Ajax source,
* etc) or processed for input (DOM source). This can be used as a compliment to mRender
* allowing you to modify the DOM element (add background colour for example) when the
* element is available.
* @type function
* @param {element} td The TD node that has been created
* @param {*} cellData The Data for the cell
* @param {array|object} rowData The data for the whole row
* @param {int} row The row index for the aoData data store
* @param {int} col The column index for aoColumns
*
* @name DataTable.defaults.column.createdCell
* @dtopt Columns
*
* @example
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [ {
* "targets": [3],
* "createdCell": function (td, cellData, rowData, row, col) {
* if ( cellData == "1.7" ) {
* $(td).css('color', 'blue')
* }
* }
* } ]
* });
* } );
*/
"fnCreatedCell": null,
/**
* This parameter has been replaced by `data` in DataTables to ensure naming
* consistency. `dataProp` can still be used, as there is backwards
* compatibility in DataTables for this option, but it is strongly
* recommended that you use `data` in preference to `dataProp`.
* @name DataTable.defaults.column.dataProp
*/
/**
* This property can be used to read data from any data source property,
* including deeply nested objects / properties. `data` can be given in a
* number of different ways which effect its behaviour:
*
* * `integer` - treated as an array index for the data source. This is the
* default that DataTables uses (incrementally increased for each column).
* * `string` - read an object property from the data source. There are
* three 'special' options that can be used in the string to alter how
* DataTables reads the data from the source object:
* * `.` - Dotted Javascript notation. Just as you use a `.` in
* Javascript to read from nested objects, so to can the options
* specified in `data`. For example: `browser.version` or
* `browser.name`. If your object parameter name contains a period, use
* `\\` to escape it - i.e. `first\\.name`.
* * `[]` - Array notation. DataTables can automatically combine data
* from and array source, joining the data with the characters provided
* between the two brackets. For example: `name[, ]` would provide a
* comma-space separated list from the source array. If no characters
* are provided between the brackets, the original array source is
* returned.
* * `()` - Function notation. Adding `()` to the end of a parameter will
* execute a function of the name given. For example: `browser()` for a
* simple function on the data source, `browser.version()` for a
* function in a nested property or even `browser().version` to get an
* object property if the function called returns an object. Note that
* function notation is recommended for use in `render` rather than
* `data` as it is much simpler to use as a renderer.
* * `null` - use the original data source for the row rather than plucking
* data directly from it. This action has effects on two other
* initialisation options:
* * `defaultContent` - When null is given as the `data` option and
* `defaultContent` is specified for the column, the value defined by
* `defaultContent` will be used for the cell.
* * `render` - When null is used for the `data` option and the `render`
* option is specified for the column, the whole data source for the
* row is used for the renderer.
* * `function` - the function given will be executed whenever DataTables
* needs to set or get the data for a cell in the column. The function
* takes three parameters:
* * Parameters:
* * `{array|object}` The data source for the row
* * `{string}` The type call data requested - this will be 'set' when
* setting data or 'filter', 'display', 'type', 'sort' or undefined
* when gathering data. Note that when `undefined` is given for the
* type DataTables expects to get the raw data for the object back<
* * `{*}` Data to set when the second parameter is 'set'.
* * Return:
* * The return value from the function is not required when 'set' is
* the type of call, but otherwise the return is what will be used
* for the data requested.
*
* Note that `data` is a getter and setter option. If you just require
* formatting of data for output, you will likely want to use `render` which
* is simply a getter and thus simpler to use.
*
* Note that prior to DataTables 1.9.2 `data` was called `mDataProp`. The
* name change reflects the flexibility of this property and is consistent
* with the naming of mRender. If 'mDataProp' is given, then it will still
* be used by DataTables, as it automatically maps the old name to the new
* if required.
*
* @type string|int|function|null
* @default null <i>Use automatically calculated column index</i>
*
* @name DataTable.defaults.column.data
* @dtopt Columns
*
* @example
* // Read table data from objects
* // JSON structure for each row:
* // {
* // "engine": {value},
* // "browser": {value},
* // "platform": {value},
* // "version": {value},
* // "grade": {value}
* // }
* $(document).ready( function() {
* $('#example').dataTable( {
* "ajaxSource": "sources/objects.txt",
* "columns": [
* { "data": "engine" },
* { "data": "browser" },
* { "data": "platform" },
* { "data": "version" },
* { "data": "grade" }
* ]
* } );
* } );
*
* @example
* // Read information from deeply nested objects
* // JSON structure for each row:
* // {
* // "engine": {value},
* // "browser": {value},
* // "platform": {
* // "inner": {value}
* // },
* // "details": [
* // {value}, {value}
* // ]
* // }
* $(document).ready( function() {
* $('#example').dataTable( {
* "ajaxSource": "sources/deep.txt",
* "columns": [
* { "data": "engine" },
* { "data": "browser" },
* { "data": "platform.inner" },
* { "data": "platform.details.0" },
* { "data": "platform.details.1" }
* ]
* } );
* } );
*
* @example
* // Using `data` as a function to provide different information for
* // sorting, filtering and display. In this case, currency (price)
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [ {
* "targets": [ 0 ],
* "data": function ( source, type, val ) {
* if (type === 'set') {
* source.price = val;
* // Store the computed dislay and filter values for efficiency
* source.price_display = val=="" ? "" : "$"+numberFormat(val);
* source.price_filter = val=="" ? "" : "$"+numberFormat(val)+" "+val;
* return;
* }
* else if (type === 'display') {
* return source.price_display;
* }
* else if (type === 'filter') {
* return source.price_filter;
* }
* // 'sort', 'type' and undefined all just use the integer
* return source.price;
* }
* } ]
* } );
* } );
*
* @example
* // Using default content
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [ {
* "targets": [ 0 ],
* "data": null,
* "defaultContent": "Click to edit"
* } ]
* } );
* } );
*
* @example
* // Using array notation - outputting a list from an array
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [ {
* "targets": [ 0 ],
* "data": "name[, ]"
* } ]
* } );
* } );
*
*/
"mData": null,
/**
* This property is the rendering partner to `data` and it is suggested that
* when you want to manipulate data for display (including filtering,
* sorting etc) without altering the underlying data for the table, use this
* property. `render` can be considered to be the the read only companion to
* `data` which is read / write (then as such more complex). Like `data`
* this option can be given in a number of different ways to effect its
* behaviour:
*
* * `integer` - treated as an array index for the data source. This is the
* default that DataTables uses (incrementally increased for each column).
* * `string` - read an object property from the data source. There are
* three 'special' options that can be used in the string to alter how
* DataTables reads the data from the source object:
* * `.` - Dotted Javascript notation. Just as you use a `.` in
* Javascript to read from nested objects, so to can the options
* specified in `data`. For example: `browser.version` or
* `browser.name`. If your object parameter name contains a period, use
* `\\` to escape it - i.e. `first\\.name`.
* * `[]` - Array notation. DataTables can automatically combine data
* from and array source, joining the data with the characters provided
* between the two brackets. For example: `name[, ]` would provide a
* comma-space separated list from the source array. If no characters
* are provided between the brackets, the original array source is
* returned.
* * `()` - Function notation. Adding `()` to the end of a parameter will
* execute a function of the name given. For example: `browser()` for a
* simple function on the data source, `browser.version()` for a
* function in a nested property or even `browser().version` to get an
* object property if the function called returns an object.
* * `object` - use different data for the different data types requested by
* DataTables ('filter', 'display', 'type' or 'sort'). The property names
* of the object is the data type the property refers to and the value can
* defined using an integer, string or function using the same rules as
* `render` normally does. Note that an `_` option _must_ be specified.
* This is the default value to use if you haven't specified a value for
* the data type requested by DataTables.
* * `function` - the function given will be executed whenever DataTables
* needs to set or get the data for a cell in the column. The function
* takes three parameters:
* * Parameters:
* * {array|object} The data source for the row (based on `data`)
* * {string} The type call data requested - this will be 'filter',
* 'display', 'type' or 'sort'.
* * {array|object} The full data source for the row (not based on
* `data`)
* * Return:
* * The return value from the function is what will be used for the
* data requested.
*
* @type string|int|function|object|null
* @default null Use the data source value.
*
* @name DataTable.defaults.column.render
* @dtopt Columns
*
* @example
* // Create a comma separated list from an array of objects
* $(document).ready( function() {
* $('#example').dataTable( {
* "ajaxSource": "sources/deep.txt",
* "columns": [
* { "data": "engine" },
* { "data": "browser" },
* {
* "data": "platform",
* "render": "[, ].name"
* }
* ]
* } );
* } );
*
* @example
* // Execute a function to obtain data
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [ {
* "targets": [ 0 ],
* "data": null, // Use the full data source object for the renderer's source
* "render": "browserName()"
* } ]
* } );
* } );
*
* @example
* // As an object, extracting different data for the different types
* // This would be used with a data source such as:
* // { "phone": 5552368, "phone_filter": "5552368 555-2368", "phone_display": "555-2368" }
* // Here the `phone` integer is used for sorting and type detection, while `phone_filter`
* // (which has both forms) is used for filtering for if a user inputs either format, while
* // the formatted phone number is the one that is shown in the table.
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [ {
* "targets": [ 0 ],
* "data": null, // Use the full data source object for the renderer's source
* "render": {
* "_": "phone",
* "filter": "phone_filter",
* "display": "phone_display"
* }
* } ]
* } );
* } );
*
* @example
* // Use as a function to create a link from the data source
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [ {
* "targets": [ 0 ],
* "data": "download_link",
* "render": function ( data, type, full ) {
* return '<a href="'+data+'">Download</a>';
* }
* } ]
* } );
* } );
*/
"mRender": null,
/**
* Change the cell type created for the column - either TD cells or TH cells. This
* can be useful as TH cells have semantic meaning in the table body, allowing them
* to act as a header for a row (you may wish to add scope='row' to the TH elements).
* @type string
* @default td
*
* @name DataTable.defaults.column.cellType
* @dtopt Columns
*
* @example
* // Make the first column use TH cells
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [ {
* "targets": [ 0 ],
* "cellType": "th"
* } ]
* } );
* } );
*/
"sCellType": "td",
/**
* Class to give to each cell in this column.
* @type string
* @default <i>Empty string</i>
*
* @name DataTable.defaults.column.class
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "class": "my_class", "targets": [ 0 ] }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "class": "my_class" },
* null,
* null,
* null,
* null
* ]
* } );
* } );
*/
"sClass": "",
/**
* When DataTables calculates the column widths to assign to each column,
* it finds the longest string in each column and then constructs a
* temporary table and reads the widths from that. The problem with this
* is that "mmm" is much wider then "iiii", but the latter is a longer
* string - thus the calculation can go wrong (doing it properly and putting
* it into an DOM object and measuring that is horribly(!) slow). Thus as
* a "work around" we provide this option. It will append its value to the
* text that is found to be the longest string for the column - i.e. padding.
* Generally you shouldn't need this!
* @type string
* @default <i>Empty string<i>
*
* @name DataTable.defaults.column.contentPadding
* @dtopt Columns
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* null,
* null,
* null,
* {
* "contentPadding": "mmm"
* }
* ]
* } );
* } );
*/
"sContentPadding": "",
/**
* Allows a default value to be given for a column's data, and will be used
* whenever a null data source is encountered (this can be because `data`
* is set to null, or because the data source itself is null).
* @type string
* @default null
*
* @name DataTable.defaults.column.defaultContent
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* {
* "data": null,
* "defaultContent": "Edit",
* "targets": [ -1 ]
* }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* null,
* null,
* null,
* {
* "data": null,
* "defaultContent": "Edit"
* }
* ]
* } );
* } );
*/
"sDefaultContent": null,
/**
* This parameter is only used in DataTables' server-side processing. It can
* be exceptionally useful to know what columns are being displayed on the
* client side, and to map these to database fields. When defined, the names
* also allow DataTables to reorder information from the server if it comes
* back in an unexpected order (i.e. if you switch your columns around on the
* client-side, your server-side code does not also need updating).
* @type string
* @default <i>Empty string</i>
*
* @name DataTable.defaults.column.name
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "name": "engine", "targets": [ 0 ] },
* { "name": "browser", "targets": [ 1 ] },
* { "name": "platform", "targets": [ 2 ] },
* { "name": "version", "targets": [ 3 ] },
* { "name": "grade", "targets": [ 4 ] }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "name": "engine" },
* { "name": "browser" },
* { "name": "platform" },
* { "name": "version" },
* { "name": "grade" }
* ]
* } );
* } );
*/
"sName": "",
/**
* Defines a data source type for the ordering which can be used to read
* real-time information from the table (updating the internally cached
* version) prior to ordering. This allows ordering to occur on user
* editable elements such as form inputs.
* @type string
* @default std
*
* @name DataTable.defaults.column.orderDataType
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "orderDataType": "dom-text", "targets": [ 2, 3 ] },
* { "type": "numeric", "targets": [ 3 ] },
* { "orderDataType": "dom-select", "targets": [ 4 ] },
* { "orderDataType": "dom-checkbox", "targets": [ 5 ] }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* null,
* null,
* { "orderDataType": "dom-text" },
* { "orderDataType": "dom-text", "type": "numeric" },
* { "orderDataType": "dom-select" },
* { "orderDataType": "dom-checkbox" }
* ]
* } );
* } );
*/
"sSortDataType": "std",
/**
* The title of this column.
* @type string
* @default null <i>Derived from the 'TH' value for this column in the
* original HTML table.</i>
*
* @name DataTable.defaults.column.title
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "title": "My column title", "targets": [ 0 ] }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "title": "My column title" },
* null,
* null,
* null,
* null
* ]
* } );
* } );
*/
"sTitle": null,
/**
* The type allows you to specify how the data for this column will be
* ordered. Four types (string, numeric, date and html (which will strip
* HTML tags before ordering)) are currently available. Note that only date
* formats understood by Javascript's Date() object will be accepted as type
* date. For example: "Mar 26, 2008 5:03 PM". May take the values: 'string',
* 'numeric', 'date' or 'html' (by default). Further types can be adding
* through plug-ins.
* @type string
* @default null <i>Auto-detected from raw data</i>
*
* @name DataTable.defaults.column.type
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "type": "html", "targets": [ 0 ] }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "type": "html" },
* null,
* null,
* null,
* null
* ]
* } );
* } );
*/
"sType": null,
/**
* Defining the width of the column, this parameter may take any CSS value
* (3em, 20px etc). DataTables applies 'smart' widths to columns which have not
* been given a specific width through this interface ensuring that the table
* remains readable.
* @type string
* @default null <i>Automatic</i>
*
* @name DataTable.defaults.column.width
* @dtopt Columns
*
* @example
* // Using `columnDefs`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columnDefs": [
* { "width": "20%", "targets": [ 0 ] }
* ]
* } );
* } );
*
* @example
* // Using `columns`
* $(document).ready( function() {
* $('#example').dataTable( {
* "columns": [
* { "width": "20%" },
* null,
* null,
* null,
* null
* ]
* } );
* } );
*/
"sWidth": null
};
_fnHungarianMap( DataTable.defaults.column );
/**
* DataTables settings object - this holds all the information needed for a
* given table, including configuration, data and current application of the
* table options. DataTables does not have a single instance for each DataTable
* with the settings attached to that instance, but rather instances of the
* DataTable "class" are created on-the-fly as needed (typically by a
* $().dataTable() call) and the settings object is then applied to that
* instance.
*
* Note that this object is related to {@link DataTable.defaults} but this
* one is the internal data store for DataTables's cache of columns. It should
* NOT be manipulated outside of DataTables. Any configuration should be done
* through the initialisation options.
* @namespace
* @todo Really should attach the settings object to individual instances so we
* don't need to create new instances on each $().dataTable() call (if the
* table already exists). It would also save passing oSettings around and
* into every single function. However, this is a very significant
* architecture change for DataTables and will almost certainly break
* backwards compatibility with older installations. This is something that
* will be done in 2.0.
*/
DataTable.models.oSettings = {
/**
* Primary features of DataTables and their enablement state.
* @namespace
*/
"oFeatures": {
/**
* Flag to say if DataTables should automatically try to calculate the
* optimum table and columns widths (true) or not (false).
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bAutoWidth": null,
/**
* Delay the creation of TR and TD elements until they are actually
* needed by a driven page draw. This can give a significant speed
* increase for Ajax source and Javascript source data, but makes no
* difference at all fro DOM and server-side processing tables.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bDeferRender": null,
/**
* Enable filtering on the table or not. Note that if this is disabled
* then there is no filtering at all on the table, including fnFilter.
* To just remove the filtering input use sDom and remove the 'f' option.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bFilter": null,
/**
* Table information element (the 'Showing x of y records' div) enable
* flag.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bInfo": null,
/**
* Present a user control allowing the end user to change the page size
* when pagination is enabled.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bLengthChange": null,
/**
* Pagination enabled or not. Note that if this is disabled then length
* changing must also be disabled.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bPaginate": null,
/**
* Processing indicator enable flag whenever DataTables is enacting a
* user request - typically an Ajax request for server-side processing.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bProcessing": null,
/**
* Server-side processing enabled flag - when enabled DataTables will
* get all data from the server for every draw - there is no filtering,
* sorting or paging done on the client-side.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bServerSide": null,
/**
* Sorting enablement flag.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bSort": null,
/**
* Multi-column sorting
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bSortMulti": null,
/**
* Apply a class to the columns which are being sorted to provide a
* visual highlight or not. This can slow things down when enabled since
* there is a lot of DOM interaction.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bSortClasses": null,
/**
* State saving enablement flag.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bStateSave": null
},
/**
* Scrolling settings for a table.
* @namespace
*/
"oScroll": {
/**
* When the table is shorter in height than sScrollY, collapse the
* table container down to the height of the table (when true).
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bCollapse": null,
/**
* Width of the scrollbar for the web-browser's platform. Calculated
* during table initialisation.
* @type int
* @default 0
*/
"iBarWidth": 0,
/**
* Viewport width for horizontal scrolling. Horizontal scrolling is
* disabled if an empty string.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type string
*/
"sX": null,
/**
* Width to expand the table to when using x-scrolling. Typically you
* should not need to use this.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type string
* @deprecated
*/
"sXInner": null,
/**
* Viewport height for vertical scrolling. Vertical scrolling is disabled
* if an empty string.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type string
*/
"sY": null
},
/**
* Language information for the table.
* @namespace
* @extends DataTable.defaults.oLanguage
*/
"oLanguage": {
/**
* Information callback function. See
* {@link DataTable.defaults.fnInfoCallback}
* @type function
* @default null
*/
"fnInfoCallback": null
},
/**
* Browser support parameters
* @namespace
*/
"oBrowser": {
/**
* Indicate if the browser incorrectly calculates width:100% inside a
* scrolling element (IE6/7)
* @type boolean
* @default false
*/
"bScrollOversize": false,
/**
* Determine if the vertical scrollbar is on the right or left of the
* scrolling container - needed for rtl language layout, although not
* all browsers move the scrollbar (Safari).
* @type boolean
* @default false
*/
"bScrollbarLeft": false,
/**
* Flag for if `getBoundingClientRect` is fully supported or not
* @type boolean
* @default false
*/
"bBounding": false,
/**
* Browser scrollbar width
* @type integer
* @default 0
*/
"barWidth": 0
},
"ajax": null,
/**
* Array referencing the nodes which are used for the features. The
* parameters of this object match what is allowed by sDom - i.e.
* <ul>
* <li>'l' - Length changing</li>
* <li>'f' - Filtering input</li>
* <li>'t' - The table!</li>
* <li>'i' - Information</li>
* <li>'p' - Pagination</li>
* <li>'r' - pRocessing</li>
* </ul>
* @type array
* @default []
*/
"aanFeatures": [],
/**
* Store data information - see {@link DataTable.models.oRow} for detailed
* information.
* @type array
* @default []
*/
"aoData": [],
/**
* Array of indexes which are in the current display (after filtering etc)
* @type array
* @default []
*/
"aiDisplay": [],
/**
* Array of indexes for display - no filtering
* @type array
* @default []
*/
"aiDisplayMaster": [],
/**
* Map of row ids to data indexes
* @type object
* @default {}
*/
"aIds": {},
/**
* Store information about each column that is in use
* @type array
* @default []
*/
"aoColumns": [],
/**
* Store information about the table's header
* @type array
* @default []
*/
"aoHeader": [],
/**
* Store information about the table's footer
* @type array
* @default []
*/
"aoFooter": [],
/**
* Store the applied global search information in case we want to force a
* research or compare the old search to a new one.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @namespace
* @extends DataTable.models.oSearch
*/
"oPreviousSearch": {},
/**
* Store the applied search for each column - see
* {@link DataTable.models.oSearch} for the format that is used for the
* filtering information for each column.
* @type array
* @default []
*/
"aoPreSearchCols": [],
/**
* Sorting that is applied to the table. Note that the inner arrays are
* used in the following manner:
* <ul>
* <li>Index 0 - column number</li>
* <li>Index 1 - current sorting direction</li>
* </ul>
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type array
* @todo These inner arrays should really be objects
*/
"aaSorting": null,
/**
* Sorting that is always applied to the table (i.e. prefixed in front of
* aaSorting).
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type array
* @default []
*/
"aaSortingFixed": [],
/**
* Classes to use for the striping of a table.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type array
* @default []
*/
"asStripeClasses": null,
/**
* If restoring a table - we should restore its striping classes as well
* @type array
* @default []
*/
"asDestroyStripes": [],
/**
* If restoring a table - we should restore its width
* @type int
* @default 0
*/
"sDestroyWidth": 0,
/**
* Callback functions array for every time a row is inserted (i.e. on a draw).
* @type array
* @default []
*/
"aoRowCallback": [],
/**
* Callback functions for the header on each draw.
* @type array
* @default []
*/
"aoHeaderCallback": [],
/**
* Callback function for the footer on each draw.
* @type array
* @default []
*/
"aoFooterCallback": [],
/**
* Array of callback functions for draw callback functions
* @type array
* @default []
*/
"aoDrawCallback": [],
/**
* Array of callback functions for row created function
* @type array
* @default []
*/
"aoRowCreatedCallback": [],
/**
* Callback functions for just before the table is redrawn. A return of
* false will be used to cancel the draw.
* @type array
* @default []
*/
"aoPreDrawCallback": [],
/**
* Callback functions for when the table has been initialised.
* @type array
* @default []
*/
"aoInitComplete": [],
/**
* Callbacks for modifying the settings to be stored for state saving, prior to
* saving state.
* @type array
* @default []
*/
"aoStateSaveParams": [],
/**
* Callbacks for modifying the settings that have been stored for state saving
* prior to using the stored values to restore the state.
* @type array
* @default []
*/
"aoStateLoadParams": [],
/**
* Callbacks for operating on the settings object once the saved state has been
* loaded
* @type array
* @default []
*/
"aoStateLoaded": [],
/**
* Cache the table ID for quick access
* @type string
* @default <i>Empty string</i>
*/
"sTableId": "",
/**
* The TABLE node for the main table
* @type node
* @default null
*/
"nTable": null,
/**
* Permanent ref to the thead element
* @type node
* @default null
*/
"nTHead": null,
/**
* Permanent ref to the tfoot element - if it exists
* @type node
* @default null
*/
"nTFoot": null,
/**
* Permanent ref to the tbody element
* @type node
* @default null
*/
"nTBody": null,
/**
* Cache the wrapper node (contains all DataTables controlled elements)
* @type node
* @default null
*/
"nTableWrapper": null,
/**
* Indicate if when using server-side processing the loading of data
* should be deferred until the second draw.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
* @default false
*/
"bDeferLoading": false,
/**
* Indicate if all required information has been read in
* @type boolean
* @default false
*/
"bInitialised": false,
/**
* Information about open rows. Each object in the array has the parameters
* 'nTr' and 'nParent'
* @type array
* @default []
*/
"aoOpenRows": [],
/**
* Dictate the positioning of DataTables' control elements - see
* {@link DataTable.model.oInit.sDom}.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type string
* @default null
*/
"sDom": null,
/**
* Search delay (in mS)
* @type integer
* @default null
*/
"searchDelay": null,
/**
* Which type of pagination should be used.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type string
* @default two_button
*/
"sPaginationType": "two_button",
/**
* The state duration (for `stateSave`) in seconds.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type int
* @default 0
*/
"iStateDuration": 0,
/**
* Array of callback functions for state saving. Each array element is an
* object with the following parameters:
* <ul>
* <li>function:fn - function to call. Takes two parameters, oSettings
* and the JSON string to save that has been thus far created. Returns
* a JSON string to be inserted into a json object
* (i.e. '"param": [ 0, 1, 2]')</li>
* <li>string:sName - name of callback</li>
* </ul>
* @type array
* @default []
*/
"aoStateSave": [],
/**
* Array of callback functions for state loading. Each array element is an
* object with the following parameters:
* <ul>
* <li>function:fn - function to call. Takes two parameters, oSettings
* and the object stored. May return false to cancel state loading</li>
* <li>string:sName - name of callback</li>
* </ul>
* @type array
* @default []
*/
"aoStateLoad": [],
/**
* State that was saved. Useful for back reference
* @type object
* @default null
*/
"oSavedState": null,
/**
* State that was loaded. Useful for back reference
* @type object
* @default null
*/
"oLoadedState": null,
/**
* Source url for AJAX data for the table.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type string
* @default null
*/
"sAjaxSource": null,
/**
* Property from a given object from which to read the table data from. This
* can be an empty string (when not server-side processing), in which case
* it is assumed an an array is given directly.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type string
*/
"sAjaxDataProp": null,
/**
* Note if draw should be blocked while getting data
* @type boolean
* @default true
*/
"bAjaxDataGet": true,
/**
* The last jQuery XHR object that was used for server-side data gathering.
* This can be used for working with the XHR information in one of the
* callbacks
* @type object
* @default null
*/
"jqXHR": null,
/**
* JSON returned from the server in the last Ajax request
* @type object
* @default undefined
*/
"json": undefined,
/**
* Data submitted as part of the last Ajax request
* @type object
* @default undefined
*/
"oAjaxData": undefined,
/**
* Function to get the server-side data.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type function
*/
"fnServerData": null,
/**
* Functions which are called prior to sending an Ajax request so extra
* parameters can easily be sent to the server
* @type array
* @default []
*/
"aoServerParams": [],
/**
* Send the XHR HTTP method - GET or POST (could be PUT or DELETE if
* required).
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type string
*/
"sServerMethod": null,
/**
* Format numbers for display.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type function
*/
"fnFormatNumber": null,
/**
* List of options that can be used for the user selectable length menu.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type array
* @default []
*/
"aLengthMenu": null,
/**
* Counter for the draws that the table does. Also used as a tracker for
* server-side processing
* @type int
* @default 0
*/
"iDraw": 0,
/**
* Indicate if a redraw is being done - useful for Ajax
* @type boolean
* @default false
*/
"bDrawing": false,
/**
* Draw index (iDraw) of the last error when parsing the returned data
* @type int
* @default -1
*/
"iDrawError": -1,
/**
* Paging display length
* @type int
* @default 10
*/
"_iDisplayLength": 10,
/**
* Paging start point - aiDisplay index
* @type int
* @default 0
*/
"_iDisplayStart": 0,
/**
* Server-side processing - number of records in the result set
* (i.e. before filtering), Use fnRecordsTotal rather than
* this property to get the value of the number of records, regardless of
* the server-side processing setting.
* @type int
* @default 0
* @private
*/
"_iRecordsTotal": 0,
/**
* Server-side processing - number of records in the current display set
* (i.e. after filtering). Use fnRecordsDisplay rather than
* this property to get the value of the number of records, regardless of
* the server-side processing setting.
* @type boolean
* @default 0
* @private
*/
"_iRecordsDisplay": 0,
/**
* The classes to use for the table
* @type object
* @default {}
*/
"oClasses": {},
/**
* Flag attached to the settings object so you can check in the draw
* callback if filtering has been done in the draw. Deprecated in favour of
* events.
* @type boolean
* @default false
* @deprecated
*/
"bFiltered": false,
/**
* Flag attached to the settings object so you can check in the draw
* callback if sorting has been done in the draw. Deprecated in favour of
* events.
* @type boolean
* @default false
* @deprecated
*/
"bSorted": false,
/**
* Indicate that if multiple rows are in the header and there is more than
* one unique cell per column, if the top one (true) or bottom one (false)
* should be used for sorting / title by DataTables.
* Note that this parameter will be set by the initialisation routine. To
* set a default use {@link DataTable.defaults}.
* @type boolean
*/
"bSortCellsTop": null,
/**
* Initialisation object that is used for the table
* @type object
* @default null
*/
"oInit": null,
/**
* Destroy callback functions - for plug-ins to attach themselves to the
* destroy so they can clean up markup and events.
* @type array
* @default []
*/
"aoDestroyCallback": [],
/**
* Get the number of records in the current record set, before filtering
* @type function
*/
"fnRecordsTotal": function ()
{
return _fnDataSource( this ) == 'ssp' ?
this._iRecordsTotal * 1 :
this.aiDisplayMaster.length;
},
/**
* Get the number of records in the current record set, after filtering
* @type function
*/
"fnRecordsDisplay": function ()
{
return _fnDataSource( this ) == 'ssp' ?
this._iRecordsDisplay * 1 :
this.aiDisplay.length;
},
/**
* Get the display end point - aiDisplay index
* @type function
*/
"fnDisplayEnd": function ()
{
var
len = this._iDisplayLength,
start = this._iDisplayStart,
calc = start + len,
records = this.aiDisplay.length,
features = this.oFeatures,
paginate = features.bPaginate;
if ( features.bServerSide ) {
return paginate === false || len === -1 ?
start + records :
Math.min( start+len, this._iRecordsDisplay );
}
else {
return ! paginate || calc>records || len===-1 ?
records :
calc;
}
},
/**
* The DataTables object for this table
* @type object
* @default null
*/
"oInstance": null,
/**
* Unique identifier for each instance of the DataTables object. If there
* is an ID on the table node, then it takes that value, otherwise an
* incrementing internal counter is used.
* @type string
* @default null
*/
"sInstance": null,
/**
* tabindex attribute value that is added to DataTables control elements, allowing
* keyboard navigation of the table and its controls.
*/
"iTabIndex": 0,
/**
* DIV container for the footer scrolling table if scrolling
*/
"nScrollHead": null,
/**
* DIV container for the footer scrolling table if scrolling
*/
"nScrollFoot": null,
/**
* Last applied sort
* @type array
* @default []
*/
"aLastSort": [],
/**
* Stored plug-in instances
* @type object
* @default {}
*/
"oPlugins": {},
/**
* Function used to get a row's id from the row's data
* @type function
* @default null
*/
"rowIdFn": null,
/**
* Data location where to store a row's id
* @type string
* @default null
*/
"rowId": null
};
/**
* Extension object for DataTables that is used to provide all extension
* options.
*
* Note that the `DataTable.ext` object is available through
* `jQuery.fn.dataTable.ext` where it may be accessed and manipulated. It is
* also aliased to `jQuery.fn.dataTableExt` for historic reasons.
* @namespace
* @extends DataTable.models.ext
*/
/**
* DataTables extensions
*
* This namespace acts as a collection area for plug-ins that can be used to
* extend DataTables capabilities. Indeed many of the build in methods
* use this method to provide their own capabilities (sorting methods for
* example).
*
* Note that this namespace is aliased to `jQuery.fn.dataTableExt` for legacy
* reasons
*
* @namespace
*/
DataTable.ext = _ext = {
/**
* Buttons. For use with the Buttons extension for DataTables. This is
* defined here so other extensions can define buttons regardless of load
* order. It is _not_ used by DataTables core.
*
* @type object
* @default {}
*/
buttons: {},
/**
* Element class names
*
* @type object
* @default {}
*/
classes: {},
/**
* DataTables build type (expanded by the download builder)
*
* @type string
*/
builder: "-source-",
/**
* Error reporting.
*
* How should DataTables report an error. Can take the value 'alert',
* 'throw', 'none' or a function.
*
* @type string|function
* @default alert
*/
errMode: "alert",
/**
* Feature plug-ins.
*
* This is an array of objects which describe the feature plug-ins that are
* available to DataTables. These feature plug-ins are then available for
* use through the `dom` initialisation option.
*
* Each feature plug-in is described by an object which must have the
* following properties:
*
* * `fnInit` - function that is used to initialise the plug-in,
* * `cFeature` - a character so the feature can be enabled by the `dom`
* instillation option. This is case sensitive.
*
* The `fnInit` function has the following input parameters:
*
* 1. `{object}` DataTables settings object: see
* {@link DataTable.models.oSettings}
*
* And the following return is expected:
*
* * {node|null} The element which contains your feature. Note that the
* return may also be void if your plug-in does not require to inject any
* DOM elements into DataTables control (`dom`) - for example this might
* be useful when developing a plug-in which allows table control via
* keyboard entry
*
* @type array
*
* @example
* $.fn.dataTable.ext.features.push( {
* "fnInit": function( oSettings ) {
* return new TableTools( { "oDTSettings": oSettings } );
* },
* "cFeature": "T"
* } );
*/
feature: [],
/**
* Row searching.
*
* This method of searching is complimentary to the default type based
* searching, and a lot more comprehensive as it allows you complete control
* over the searching logic. Each element in this array is a function
* (parameters described below) that is called for every row in the table,
* and your logic decides if it should be included in the searching data set
* or not.
*
* Searching functions have the following input parameters:
*
* 1. `{object}` DataTables settings object: see
* {@link DataTable.models.oSettings}
* 2. `{array|object}` Data for the row to be processed (same as the
* original format that was passed in as the data source, or an array
* from a DOM data source
* 3. `{int}` Row index ({@link DataTable.models.oSettings.aoData}), which
* can be useful to retrieve the `TR` element if you need DOM interaction.
*
* And the following return is expected:
*
* * {boolean} Include the row in the searched result set (true) or not
* (false)
*
* Note that as with the main search ability in DataTables, technically this
* is "filtering", since it is subtractive. However, for consistency in
* naming we call it searching here.
*
* @type array
* @default []
*
* @example
* // The following example shows custom search being applied to the
* // fourth column (i.e. the data[3] index) based on two input values
* // from the end-user, matching the data in a certain range.
* $.fn.dataTable.ext.search.push(
* function( settings, data, dataIndex ) {
* var min = document.getElementById('min').value * 1;
* var max = document.getElementById('max').value * 1;
* var version = data[3] == "-" ? 0 : data[3]*1;
*
* if ( min == "" && max == "" ) {
* return true;
* }
* else if ( min == "" && version < max ) {
* return true;
* }
* else if ( min < version && "" == max ) {
* return true;
* }
* else if ( min < version && version < max ) {
* return true;
* }
* return false;
* }
* );
*/
search: [],
/**
* Selector extensions
*
* The `selector` option can be used to extend the options available for the
* selector modifier options (`selector-modifier` object data type) that
* each of the three built in selector types offer (row, column and cell +
* their plural counterparts). For example the Select extension uses this
* mechanism to provide an option to select only rows, columns and cells
* that have been marked as selected by the end user (`{selected: true}`),
* which can be used in conjunction with the existing built in selector
* options.
*
* Each property is an array to which functions can be pushed. The functions
* take three attributes:
*
* * Settings object for the host table
* * Options object (`selector-modifier` object type)
* * Array of selected item indexes
*
* The return is an array of the resulting item indexes after the custom
* selector has been applied.
*
* @type object
*/
selector: {
cell: [],
column: [],
row: []
},
/**
* Internal functions, exposed for used in plug-ins.
*
* Please note that you should not need to use the internal methods for
* anything other than a plug-in (and even then, try to avoid if possible).
* The internal function may change between releases.
*
* @type object
* @default {}
*/
internal: {},
/**
* Legacy configuration options. Enable and disable legacy options that
* are available in DataTables.
*
* @type object
*/
legacy: {
/**
* Enable / disable DataTables 1.9 compatible server-side processing
* requests
*
* @type boolean
* @default null
*/
ajax: null
},
/**
* Pagination plug-in methods.
*
* Each entry in this object is a function and defines which buttons should
* be shown by the pagination rendering method that is used for the table:
* {@link DataTable.ext.renderer.pageButton}. The renderer addresses how the
* buttons are displayed in the document, while the functions here tell it
* what buttons to display. This is done by returning an array of button
* descriptions (what each button will do).
*
* Pagination types (the four built in options and any additional plug-in
* options defined here) can be used through the `paginationType`
* initialisation parameter.
*
* The functions defined take two parameters:
*
* 1. `{int} page` The current page index
* 2. `{int} pages` The number of pages in the table
*
* Each function is expected to return an array where each element of the
* array can be one of:
*
* * `first` - Jump to first page when activated
* * `last` - Jump to last page when activated
* * `previous` - Show previous page when activated
* * `next` - Show next page when activated
* * `{int}` - Show page of the index given
* * `{array}` - A nested array containing the above elements to add a
* containing 'DIV' element (might be useful for styling).
*
* Note that DataTables v1.9- used this object slightly differently whereby
* an object with two functions would be defined for each plug-in. That
* ability is still supported by DataTables 1.10+ to provide backwards
* compatibility, but this option of use is now decremented and no longer
* documented in DataTables 1.10+.
*
* @type object
* @default {}
*
* @example
* // Show previous, next and current page buttons only
* $.fn.dataTableExt.oPagination.current = function ( page, pages ) {
* return [ 'previous', page, 'next' ];
* };
*/
pager: {},
renderer: {
pageButton: {},
header: {}
},
/**
* Ordering plug-ins - custom data source
*
* The extension options for ordering of data available here is complimentary
* to the default type based ordering that DataTables typically uses. It
* allows much greater control over the the data that is being used to
* order a column, but is necessarily therefore more complex.
*
* This type of ordering is useful if you want to do ordering based on data
* live from the DOM (for example the contents of an 'input' element) rather
* than just the static string that DataTables knows of.
*
* The way these plug-ins work is that you create an array of the values you
* wish to be ordering for the column in question and then return that
* array. The data in the array much be in the index order of the rows in
* the table (not the currently ordering order!). Which order data gathering
* function is run here depends on the `dt-init columns.orderDataType`
* parameter that is used for the column (if any).
*
* The functions defined take two parameters:
*
* 1. `{object}` DataTables settings object: see
* {@link DataTable.models.oSettings}
* 2. `{int}` Target column index
*
* Each function is expected to return an array:
*
* * `{array}` Data for the column to be ordering upon
*
* @type array
*
* @example
* // Ordering using `input` node values
* $.fn.dataTable.ext.order['dom-text'] = function ( settings, col )
* {
* return this.api().column( col, {order:'index'} ).nodes().map( function ( td, i ) {
* return $('input', td).val();
* } );
* }
*/
order: {},
/**
* Type based plug-ins.
*
* Each column in DataTables has a type assigned to it, either by automatic
* detection or by direct assignment using the `type` option for the column.
* The type of a column will effect how it is ordering and search (plug-ins
* can also make use of the column type if required).
*
* @namespace
*/
type: {
/**
* Type detection functions.
*
* The functions defined in this object are used to automatically detect
* a column's type, making initialisation of DataTables super easy, even
* when complex data is in the table.
*
* The functions defined take two parameters:
*
* 1. `{*}` Data from the column cell to be analysed
* 2. `{settings}` DataTables settings object. This can be used to
* perform context specific type detection - for example detection
* based on language settings such as using a comma for a decimal
* place. Generally speaking the options from the settings will not
* be required
*
* Each function is expected to return:
*
* * `{string|null}` Data type detected, or null if unknown (and thus
* pass it on to the other type detection functions.
*
* @type array
*
* @example
* // Currency type detection plug-in:
* $.fn.dataTable.ext.type.detect.push(
* function ( data, settings ) {
* // Check the numeric part
* if ( ! $.isNumeric( data.substring(1) ) ) {
* return null;
* }
*
* // Check prefixed by currency
* if ( data.charAt(0) == '$' || data.charAt(0) == '£' ) {
* return 'currency';
* }
* return null;
* }
* );
*/
detect: [],
/**
* Type based search formatting.
*
* The type based searching functions can be used to pre-format the
* data to be search on. For example, it can be used to strip HTML
* tags or to de-format telephone numbers for numeric only searching.
*
* Note that is a search is not defined for a column of a given type,
* no search formatting will be performed.
*
* Pre-processing of searching data plug-ins - When you assign the sType
* for a column (or have it automatically detected for you by DataTables
* or a type detection plug-in), you will typically be using this for
* custom sorting, but it can also be used to provide custom searching
* by allowing you to pre-processing the data and returning the data in
* the format that should be searched upon. This is done by adding
* functions this object with a parameter name which matches the sType
* for that target column. This is the corollary of <i>afnSortData</i>
* for searching data.
*
* The functions defined take a single parameter:
*
* 1. `{*}` Data from the column cell to be prepared for searching
*
* Each function is expected to return:
*
* * `{string|null}` Formatted string that will be used for the searching.
*
* @type object
* @default {}
*
* @example
* $.fn.dataTable.ext.type.search['title-numeric'] = function ( d ) {
* return d.replace(/\n/g," ").replace( /<.*?>/g, "" );
* }
*/
search: {},
/**
* Type based ordering.
*
* The column type tells DataTables what ordering to apply to the table
* when a column is sorted upon. The order for each type that is defined,
* is defined by the functions available in this object.
*
* Each ordering option can be described by three properties added to
* this object:
*
* * `{type}-pre` - Pre-formatting function
* * `{type}-asc` - Ascending order function
* * `{type}-desc` - Descending order function
*
* All three can be used together, only `{type}-pre` or only
* `{type}-asc` and `{type}-desc` together. It is generally recommended
* that only `{type}-pre` is used, as this provides the optimal
* implementation in terms of speed, although the others are provided
* for compatibility with existing Javascript sort functions.
*
* `{type}-pre`: Functions defined take a single parameter:
*
* 1. `{*}` Data from the column cell to be prepared for ordering
*
* And return:
*
* * `{*}` Data to be sorted upon
*
* `{type}-asc` and `{type}-desc`: Functions are typical Javascript sort
* functions, taking two parameters:
*
* 1. `{*}` Data to compare to the second parameter
* 2. `{*}` Data to compare to the first parameter
*
* And returning:
*
* * `{*}` Ordering match: <0 if first parameter should be sorted lower
* than the second parameter, ===0 if the two parameters are equal and
* >0 if the first parameter should be sorted height than the second
* parameter.
*
* @type object
* @default {}
*
* @example
* // Numeric ordering of formatted numbers with a pre-formatter
* $.extend( $.fn.dataTable.ext.type.order, {
* "string-pre": function(x) {
* a = (a === "-" || a === "") ? 0 : a.replace( /[^\d\-\.]/g, "" );
* return parseFloat( a );
* }
* } );
*
* @example
* // Case-sensitive string ordering, with no pre-formatting method
* $.extend( $.fn.dataTable.ext.order, {
* "string-case-asc": function(x,y) {
* return ((x < y) ? -1 : ((x > y) ? 1 : 0));
* },
* "string-case-desc": function(x,y) {
* return ((x < y) ? 1 : ((x > y) ? -1 : 0));
* }
* } );
*/
order: {}
},
/**
* Unique DataTables instance counter
*
* @type int
* @private
*/
_unique: 0,
//
// Depreciated
// The following properties are retained for backwards compatiblity only.
// The should not be used in new projects and will be removed in a future
// version
//
/**
* Version check function.
* @type function
* @depreciated Since 1.10
*/
fnVersionCheck: DataTable.fnVersionCheck,
/**
* Index for what 'this' index API functions should use
* @type int
* @deprecated Since v1.10
*/
iApiIndex: 0,
/**
* jQuery UI class container
* @type object
* @deprecated Since v1.10
*/
oJUIClasses: {},
/**
* Software version
* @type string
* @deprecated Since v1.10
*/
sVersion: DataTable.version
};
//
// Backwards compatibility. Alias to pre 1.10 Hungarian notation counter parts
//
$.extend( _ext, {
afnFiltering: _ext.search,
aTypes: _ext.type.detect,
ofnSearch: _ext.type.search,
oSort: _ext.type.order,
afnSortData: _ext.order,
aoFeatures: _ext.feature,
oApi: _ext.internal,
oStdClasses: _ext.classes,
oPagination: _ext.pager
} );
$.extend( DataTable.ext.classes, {
"sTable": "dataTable",
"sNoFooter": "no-footer",
/* Paging buttons */
"sPageButton": "paginate_button",
"sPageButtonActive": "current",
"sPageButtonDisabled": "disabled",
/* Striping classes */
"sStripeOdd": "odd",
"sStripeEven": "even",
/* Empty row */
"sRowEmpty": "dataTables_empty",
/* Features */
"sWrapper": "dataTables_wrapper",
"sFilter": "dataTables_filter",
"sInfo": "dataTables_info",
"sPaging": "dataTables_paginate paging_", /* Note that the type is postfixed */
"sLength": "dataTables_length",
"sProcessing": "dataTables_processing",
/* Sorting */
"sSortAsc": "sorting_asc",
"sSortDesc": "sorting_desc",
"sSortable": "sorting", /* Sortable in both directions */
"sSortableAsc": "sorting_asc_disabled",
"sSortableDesc": "sorting_desc_disabled",
"sSortableNone": "sorting_disabled",
"sSortColumn": "sorting_", /* Note that an int is postfixed for the sorting order */
/* Filtering */
"sFilterInput": "",
/* Page length */
"sLengthSelect": "",
/* Scrolling */
"sScrollWrapper": "dataTables_scroll",
"sScrollHead": "dataTables_scrollHead",
"sScrollHeadInner": "dataTables_scrollHeadInner",
"sScrollBody": "dataTables_scrollBody",
"sScrollFoot": "dataTables_scrollFoot",
"sScrollFootInner": "dataTables_scrollFootInner",
/* Misc */
"sHeaderTH": "",
"sFooterTH": "",
// Deprecated
"sSortJUIAsc": "",
"sSortJUIDesc": "",
"sSortJUI": "",
"sSortJUIAscAllowed": "",
"sSortJUIDescAllowed": "",
"sSortJUIWrapper": "",
"sSortIcon": "",
"sJUIHeader": "",
"sJUIFooter": ""
} );
var extPagination = DataTable.ext.pager;
function _numbers ( page, pages ) {
var
numbers = [],
buttons = extPagination.numbers_length,
half = Math.floor( buttons / 2 ),
i = 1;
if ( pages <= buttons ) {
numbers = _range( 0, pages );
}
else if ( page <= half ) {
numbers = _range( 0, buttons-2 );
numbers.push( 'ellipsis' );
numbers.push( pages-1 );
}
else if ( page >= pages - 1 - half ) {
numbers = _range( pages-(buttons-2), pages );
numbers.splice( 0, 0, 'ellipsis' ); // no unshift in ie6
numbers.splice( 0, 0, 0 );
}
else {
numbers = _range( page-half+2, page+half-1 );
numbers.push( 'ellipsis' );
numbers.push( pages-1 );
numbers.splice( 0, 0, 'ellipsis' );
numbers.splice( 0, 0, 0 );
}
numbers.DT_el = 'span';
return numbers;
}
$.extend( extPagination, {
simple: function ( page, pages ) {
return [ 'previous', 'next' ];
},
full: function ( page, pages ) {
return [ 'first', 'previous', 'next', 'last' ];
},
numbers: function ( page, pages ) {
return [ _numbers(page, pages) ];
},
simple_numbers: function ( page, pages ) {
return [ 'previous', _numbers(page, pages), 'next' ];
},
full_numbers: function ( page, pages ) {
return [ 'first', 'previous', _numbers(page, pages), 'next', 'last' ];
},
first_last_numbers: function (page, pages) {
return ['first', _numbers(page, pages), 'last'];
},
// For testing and plug-ins to use
_numbers: _numbers,
// Number of number buttons (including ellipsis) to show. _Must be odd!_
numbers_length: 7
} );
$.extend( true, DataTable.ext.renderer, {
pageButton: {
_: function ( settings, host, idx, buttons, page, pages ) {
var classes = settings.oClasses;
var lang = settings.oLanguage.oPaginate;
var aria = settings.oLanguage.oAria.paginate || {};
var btnDisplay, btnClass, counter=0;
var attach = function( container, buttons ) {
var i, ien, node, button;
var clickHandler = function ( e ) {
_fnPageChange( settings, e.data.action, true );
};
for ( i=0, ien=buttons.length ; i<ien ; i++ ) {
button = buttons[i];
if ( $.isArray( button ) ) {
var inner = $( '<'+(button.DT_el || 'div')+'/>' )
.appendTo( container );
attach( inner, button );
}
else {
btnDisplay = null;
btnClass = '';
switch ( button ) {
case 'ellipsis':
container.append('<span class="ellipsis">…</span>');
break;
case 'first':
btnDisplay = lang.sFirst;
btnClass = button + (page > 0 ?
'' : ' '+classes.sPageButtonDisabled);
break;
case 'previous':
btnDisplay = lang.sPrevious;
btnClass = button + (page > 0 ?
'' : ' '+classes.sPageButtonDisabled);
break;
case 'next':
btnDisplay = lang.sNext;
btnClass = button + (page < pages-1 ?
'' : ' '+classes.sPageButtonDisabled);
break;
case 'last':
btnDisplay = lang.sLast;
btnClass = button + (page < pages-1 ?
'' : ' '+classes.sPageButtonDisabled);
break;
default:
btnDisplay = button + 1;
btnClass = page === button ?
classes.sPageButtonActive : '';
break;
}
if ( btnDisplay !== null ) {
node = $('<a>', {
'class': classes.sPageButton+' '+btnClass,
'aria-controls': settings.sTableId,
'aria-label': aria[ button ],
'data-dt-idx': counter,
'tabindex': settings.iTabIndex,
'id': idx === 0 && typeof button === 'string' ?
settings.sTableId +'_'+ button :
null
} )
.html( btnDisplay )
.appendTo( container );
_fnBindAction(
node, {action: button}, clickHandler
);
counter++;
}
}
}
};
// IE9 throws an 'unknown error' if document.activeElement is used
// inside an iframe or frame. Try / catch the error. Not good for
// accessibility, but neither are frames.
var activeEl;
try {
// Because this approach is destroying and recreating the paging
// elements, focus is lost on the select button which is bad for
// accessibility. So we want to restore focus once the draw has
// completed
activeEl = $(host).find(document.activeElement).data('dt-idx');
}
catch (e) {}
attach( $(host).empty(), buttons );
if ( activeEl !== undefined ) {
$(host).find( '[data-dt-idx='+activeEl+']' ).focus();
}
}
}
} );
// Built in type detection. See model.ext.aTypes for information about
// what is required from this methods.
$.extend( DataTable.ext.type.detect, [
// Plain numbers - first since V8 detects some plain numbers as dates
// e.g. Date.parse('55') (but not all, e.g. Date.parse('22')...).
function ( d, settings )
{
var decimal = settings.oLanguage.sDecimal;
return _isNumber( d, decimal ) ? 'num'+decimal : null;
},
// Dates (only those recognised by the browser's Date.parse)
function ( d, settings )
{
// V8 tries _very_ hard to make a string passed into `Date.parse()`
// valid, so we need to use a regex to restrict date formats. Use a
// plug-in for anything other than ISO8601 style strings
if ( d && !(d instanceof Date) && ! _re_date.test(d) ) {
return null;
}
var parsed = Date.parse(d);
return (parsed !== null && !isNaN(parsed)) || _empty(d) ? 'date' : null;
},
// Formatted numbers
function ( d, settings )
{
var decimal = settings.oLanguage.sDecimal;
return _isNumber( d, decimal, true ) ? 'num-fmt'+decimal : null;
},
// HTML numeric
function ( d, settings )
{
var decimal = settings.oLanguage.sDecimal;
return _htmlNumeric( d, decimal ) ? 'html-num'+decimal : null;
},
// HTML numeric, formatted
function ( d, settings )
{
var decimal = settings.oLanguage.sDecimal;
return _htmlNumeric( d, decimal, true ) ? 'html-num-fmt'+decimal : null;
},
// HTML (this is strict checking - there must be html)
function ( d, settings )
{
return _empty( d ) || (typeof d === 'string' && d.indexOf('<') !== -1) ?
'html' : null;
}
] );
// Filter formatting functions. See model.ext.ofnSearch for information about
// what is required from these methods.
//
// Note that additional search methods are added for the html numbers and
// html formatted numbers by `_addNumericSort()` when we know what the decimal
// place is
$.extend( DataTable.ext.type.search, {
html: function ( data ) {
return _empty(data) ?
data :
typeof data === 'string' ?
data
.replace( _re_new_lines, " " )
.replace( _re_html, "" ) :
'';
},
string: function ( data ) {
return _empty(data) ?
data :
typeof data === 'string' ?
data.replace( _re_new_lines, " " ) :
data;
}
} );
var __numericReplace = function ( d, decimalPlace, re1, re2 ) {
if ( d !== 0 && (!d || d === '-') ) {
return -Infinity;
}
// If a decimal place other than `.` is used, it needs to be given to the
// function so we can detect it and replace with a `.` which is the only
// decimal place Javascript recognises - it is not locale aware.
if ( decimalPlace ) {
d = _numToDecimal( d, decimalPlace );
}
if ( d.replace ) {
if ( re1 ) {
d = d.replace( re1, '' );
}
if ( re2 ) {
d = d.replace( re2, '' );
}
}
return d * 1;
};
// Add the numeric 'deformatting' functions for sorting and search. This is done
// in a function to provide an easy ability for the language options to add
// additional methods if a non-period decimal place is used.
function _addNumericSort ( decimalPlace ) {
$.each(
{
// Plain numbers
"num": function ( d ) {
return __numericReplace( d, decimalPlace );
},
// Formatted numbers
"num-fmt": function ( d ) {
return __numericReplace( d, decimalPlace, _re_formatted_numeric );
},
// HTML numeric
"html-num": function ( d ) {
return __numericReplace( d, decimalPlace, _re_html );
},
// HTML numeric, formatted
"html-num-fmt": function ( d ) {
return __numericReplace( d, decimalPlace, _re_html, _re_formatted_numeric );
}
},
function ( key, fn ) {
// Add the ordering method
_ext.type.order[ key+decimalPlace+'-pre' ] = fn;
// For HTML types add a search formatter that will strip the HTML
if ( key.match(/^html\-/) ) {
_ext.type.search[ key+decimalPlace ] = _ext.type.search.html;
}
}
);
}
// Default sort methods
$.extend( _ext.type.order, {
// Dates
"date-pre": function ( d ) {
return Date.parse( d ) || -Infinity;
},
// html
"html-pre": function ( a ) {
return _empty(a) ?
'' :
a.replace ?
a.replace( /<.*?>/g, "" ).toLowerCase() :
a+'';
},
// string
"string-pre": function ( a ) {
// This is a little complex, but faster than always calling toString,
// http://jsperf.com/tostring-v-check
return _empty(a) ?
'' :
typeof a === 'string' ?
a.toLowerCase() :
! a.toString ?
'' :
a.toString();
},
// string-asc and -desc are retained only for compatibility with the old
// sort methods
"string-asc": function ( x, y ) {
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
},
"string-desc": function ( x, y ) {
return ((x < y) ? 1 : ((x > y) ? -1 : 0));
}
} );
// Numeric sorting types - order doesn't matter here
_addNumericSort( '' );
$.extend( true, DataTable.ext.renderer, {
header: {
_: function ( settings, cell, column, classes ) {
// No additional mark-up required
// Attach a sort listener to update on sort - note that using the
// `DT` namespace will allow the event to be removed automatically
// on destroy, while the `dt` namespaced event is the one we are
// listening for
$(settings.nTable).on( 'order.dt.DT', function ( e, ctx, sorting, columns ) {
if ( settings !== ctx ) { // need to check this this is the host
return; // table, not a nested one
}
var colIdx = column.idx;
cell
.removeClass(
column.sSortingClass +' '+
classes.sSortAsc +' '+
classes.sSortDesc
)
.addClass( columns[ colIdx ] == 'asc' ?
classes.sSortAsc : columns[ colIdx ] == 'desc' ?
classes.sSortDesc :
column.sSortingClass
);
} );
},
jqueryui: function ( settings, cell, column, classes ) {
$('<div/>')
.addClass( classes.sSortJUIWrapper )
.append( cell.contents() )
.append( $('<span/>')
.addClass( classes.sSortIcon+' '+column.sSortingClassJUI )
)
.appendTo( cell );
// Attach a sort listener to update on sort
$(settings.nTable).on( 'order.dt.DT', function ( e, ctx, sorting, columns ) {
if ( settings !== ctx ) {
return;
}
var colIdx = column.idx;
cell
.removeClass( classes.sSortAsc +" "+classes.sSortDesc )
.addClass( columns[ colIdx ] == 'asc' ?
classes.sSortAsc : columns[ colIdx ] == 'desc' ?
classes.sSortDesc :
column.sSortingClass
);
cell
.find( 'span.'+classes.sSortIcon )
.removeClass(
classes.sSortJUIAsc +" "+
classes.sSortJUIDesc +" "+
classes.sSortJUI +" "+
classes.sSortJUIAscAllowed +" "+
classes.sSortJUIDescAllowed
)
.addClass( columns[ colIdx ] == 'asc' ?
classes.sSortJUIAsc : columns[ colIdx ] == 'desc' ?
classes.sSortJUIDesc :
column.sSortingClassJUI
);
} );
}
}
} );
/*
* Public helper functions. These aren't used internally by DataTables, or
* called by any of the options passed into DataTables, but they can be used
* externally by developers working with DataTables. They are helper functions
* to make working with DataTables a little bit easier.
*/
var __htmlEscapeEntities = function ( d ) {
return typeof d === 'string' ?
d.replace(/</g, '<').replace(/>/g, '>').replace(/"/g, '"') :
d;
};
/**
* Helpers for `columns.render`.
*
* The options defined here can be used with the `columns.render` initialisation
* option to provide a display renderer. The following functions are defined:
*
* * `number` - Will format numeric data (defined by `columns.data`) for
* display, retaining the original unformatted data for sorting and filtering.
* It takes 5 parameters:
* * `string` - Thousands grouping separator
* * `string` - Decimal point indicator
* * `integer` - Number of decimal points to show
* * `string` (optional) - Prefix.
* * `string` (optional) - Postfix (/suffix).
* * `text` - Escape HTML to help prevent XSS attacks. It has no optional
* parameters.
*
* @example
* // Column definition using the number renderer
* {
* data: "salary",
* render: $.fn.dataTable.render.number( '\'', '.', 0, '$' )
* }
*
* @namespace
*/
DataTable.render = {
number: function ( thousands, decimal, precision, prefix, postfix ) {
return {
display: function ( d ) {
if ( typeof d !== 'number' && typeof d !== 'string' ) {
return d;
}
var negative = d < 0 ? '-' : '';
var flo = parseFloat( d );
// If NaN then there isn't much formatting that we can do - just
// return immediately, escaping any HTML (this was supposed to
// be a number after all)
if ( isNaN( flo ) ) {
return __htmlEscapeEntities( d );
}
flo = flo.toFixed( precision );
d = Math.abs( flo );
var intPart = parseInt( d, 10 );
var floatPart = precision ?
decimal+(d - intPart).toFixed( precision ).substring( 2 ):
'';
return negative + (prefix||'') +
intPart.toString().replace(
/\B(?=(\d{3})+(?!\d))/g, thousands
) +
floatPart +
(postfix||'');
}
};
},
text: function () {
return {
display: __htmlEscapeEntities
};
}
};
/*
* This is really a good bit rubbish this method of exposing the internal methods
* publicly... - To be fixed in 2.0 using methods on the prototype
*/
/**
* Create a wrapper function for exporting an internal functions to an external API.
* @param {string} fn API function name
* @returns {function} wrapped function
* @memberof DataTable#internal
*/
function _fnExternApiFunc (fn)
{
return function() {
var args = [_fnSettingsFromNode( this[DataTable.ext.iApiIndex] )].concat(
Array.prototype.slice.call(arguments)
);
return DataTable.ext.internal[fn].apply( this, args );
};
}
/**
* Reference to internal functions for use by plug-in developers. Note that
* these methods are references to internal functions and are considered to be
* private. If you use these methods, be aware that they are liable to change
* between versions.
* @namespace
*/
$.extend( DataTable.ext.internal, {
_fnExternApiFunc: _fnExternApiFunc,
_fnBuildAjax: _fnBuildAjax,
_fnAjaxUpdate: _fnAjaxUpdate,
_fnAjaxParameters: _fnAjaxParameters,
_fnAjaxUpdateDraw: _fnAjaxUpdateDraw,
_fnAjaxDataSrc: _fnAjaxDataSrc,
_fnAddColumn: _fnAddColumn,
_fnColumnOptions: _fnColumnOptions,
_fnAdjustColumnSizing: _fnAdjustColumnSizing,
_fnVisibleToColumnIndex: _fnVisibleToColumnIndex,
_fnColumnIndexToVisible: _fnColumnIndexToVisible,
_fnVisbleColumns: _fnVisbleColumns,
_fnGetColumns: _fnGetColumns,
_fnColumnTypes: _fnColumnTypes,
_fnApplyColumnDefs: _fnApplyColumnDefs,
_fnHungarianMap: _fnHungarianMap,
_fnCamelToHungarian: _fnCamelToHungarian,
_fnLanguageCompat: _fnLanguageCompat,
_fnBrowserDetect: _fnBrowserDetect,
_fnAddData: _fnAddData,
_fnAddTr: _fnAddTr,
_fnNodeToDataIndex: _fnNodeToDataIndex,
_fnNodeToColumnIndex: _fnNodeToColumnIndex,
_fnGetCellData: _fnGetCellData,
_fnSetCellData: _fnSetCellData,
_fnSplitObjNotation: _fnSplitObjNotation,
_fnGetObjectDataFn: _fnGetObjectDataFn,
_fnSetObjectDataFn: _fnSetObjectDataFn,
_fnGetDataMaster: _fnGetDataMaster,
_fnClearTable: _fnClearTable,
_fnDeleteIndex: _fnDeleteIndex,
_fnInvalidate: _fnInvalidate,
_fnGetRowElements: _fnGetRowElements,
_fnCreateTr: _fnCreateTr,
_fnBuildHead: _fnBuildHead,
_fnDrawHead: _fnDrawHead,
_fnDraw: _fnDraw,
_fnReDraw: _fnReDraw,
_fnAddOptionsHtml: _fnAddOptionsHtml,
_fnDetectHeader: _fnDetectHeader,
_fnGetUniqueThs: _fnGetUniqueThs,
_fnFeatureHtmlFilter: _fnFeatureHtmlFilter,
_fnFilterComplete: _fnFilterComplete,
_fnFilterCustom: _fnFilterCustom,
_fnFilterColumn: _fnFilterColumn,
_fnFilter: _fnFilter,
_fnFilterCreateSearch: _fnFilterCreateSearch,
_fnEscapeRegex: _fnEscapeRegex,
_fnFilterData: _fnFilterData,
_fnFeatureHtmlInfo: _fnFeatureHtmlInfo,
_fnUpdateInfo: _fnUpdateInfo,
_fnInfoMacros: _fnInfoMacros,
_fnInitialise: _fnInitialise,
_fnInitComplete: _fnInitComplete,
_fnLengthChange: _fnLengthChange,
_fnFeatureHtmlLength: _fnFeatureHtmlLength,
_fnFeatureHtmlPaginate: _fnFeatureHtmlPaginate,
_fnPageChange: _fnPageChange,
_fnFeatureHtmlProcessing: _fnFeatureHtmlProcessing,
_fnProcessingDisplay: _fnProcessingDisplay,
_fnFeatureHtmlTable: _fnFeatureHtmlTable,
_fnScrollDraw: _fnScrollDraw,
_fnApplyToChildren: _fnApplyToChildren,
_fnCalculateColumnWidths: _fnCalculateColumnWidths,
_fnThrottle: _fnThrottle,
_fnConvertToWidth: _fnConvertToWidth,
_fnGetWidestNode: _fnGetWidestNode,
_fnGetMaxLenString: _fnGetMaxLenString,
_fnStringToCss: _fnStringToCss,
_fnSortFlatten: _fnSortFlatten,
_fnSort: _fnSort,
_fnSortAria: _fnSortAria,
_fnSortListener: _fnSortListener,
_fnSortAttachListener: _fnSortAttachListener,
_fnSortingClasses: _fnSortingClasses,
_fnSortData: _fnSortData,
_fnSaveState: _fnSaveState,
_fnLoadState: _fnLoadState,
_fnSettingsFromNode: _fnSettingsFromNode,
_fnLog: _fnLog,
_fnMap: _fnMap,
_fnBindAction: _fnBindAction,
_fnCallbackReg: _fnCallbackReg,
_fnCallbackFire: _fnCallbackFire,
_fnLengthOverflow: _fnLengthOverflow,
_fnRenderer: _fnRenderer,
_fnDataSource: _fnDataSource,
_fnRowAttributes: _fnRowAttributes,
_fnCalculateEnd: function () {} // Used by a lot of plug-ins, but redundant
// in 1.10, so this dead-end function is
// added to prevent errors
} );
// jQuery access
$.fn.dataTable = DataTable;
// Provide access to the host jQuery object (circular reference)
DataTable.$ = $;
// Legacy aliases
$.fn.dataTableSettings = DataTable.settings;
$.fn.dataTableExt = DataTable.ext;
// With a capital `D` we return a DataTables API instance rather than a
// jQuery object
$.fn.DataTable = function ( opts ) {
return $(this).dataTable( opts ).api();
};
// All properties that are available to $.fn.dataTable should also be
// available on $.fn.DataTable
$.each( DataTable, function ( prop, val ) {
$.fn.DataTable[ prop ] = val;
} );
// Information about events fired by DataTables - for documentation.
/**
* Draw event, fired whenever the table is redrawn on the page, at the same
* point as fnDrawCallback. This may be useful for binding events or
* performing calculations when the table is altered at all.
* @name DataTable#draw.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
*/
/**
* Search event, fired when the searching applied to the table (using the
* built-in global search, or column filters) is altered.
* @name DataTable#search.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
*/
/**
* Page change event, fired when the paging of the table is altered.
* @name DataTable#page.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
*/
/**
* Order event, fired when the ordering applied to the table is altered.
* @name DataTable#order.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
*/
/**
* DataTables initialisation complete event, fired when the table is fully
* drawn, including Ajax data loaded, if Ajax data is required.
* @name DataTable#init.dt
* @event
* @param {event} e jQuery event object
* @param {object} oSettings DataTables settings object
* @param {object} json The JSON object request from the server - only
* present if client-side Ajax sourced data is used</li></ol>
*/
/**
* State save event, fired when the table has changed state a new state save
* is required. This event allows modification of the state saving object
* prior to actually doing the save, including addition or other state
* properties (for plug-ins) or modification of a DataTables core property.
* @name DataTable#stateSaveParams.dt
* @event
* @param {event} e jQuery event object
* @param {object} oSettings DataTables settings object
* @param {object} json The state information to be saved
*/
/**
* State load event, fired when the table is loading state from the stored
* data, but prior to the settings object being modified by the saved state
* - allowing modification of the saved state is required or loading of
* state for a plug-in.
* @name DataTable#stateLoadParams.dt
* @event
* @param {event} e jQuery event object
* @param {object} oSettings DataTables settings object
* @param {object} json The saved state information
*/
/**
* State loaded event, fired when state has been loaded from stored data and
* the settings object has been modified by the loaded data.
* @name DataTable#stateLoaded.dt
* @event
* @param {event} e jQuery event object
* @param {object} oSettings DataTables settings object
* @param {object} json The saved state information
*/
/**
* Processing event, fired when DataTables is doing some kind of processing
* (be it, order, searcg or anything else). It can be used to indicate to
* the end user that there is something happening, or that something has
* finished.
* @name DataTable#processing.dt
* @event
* @param {event} e jQuery event object
* @param {object} oSettings DataTables settings object
* @param {boolean} bShow Flag for if DataTables is doing processing or not
*/
/**
* Ajax (XHR) event, fired whenever an Ajax request is completed from a
* request to made to the server for new data. This event is called before
* DataTables processed the returned data, so it can also be used to pre-
* process the data returned from the server, if needed.
*
* Note that this trigger is called in `fnServerData`, if you override
* `fnServerData` and which to use this event, you need to trigger it in you
* success function.
* @name DataTable#xhr.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
* @param {object} json JSON returned from the server
*
* @example
* // Use a custom property returned from the server in another DOM element
* $('#table').dataTable().on('xhr.dt', function (e, settings, json) {
* $('#status').html( json.status );
* } );
*
* @example
* // Pre-process the data returned from the server
* $('#table').dataTable().on('xhr.dt', function (e, settings, json) {
* for ( var i=0, ien=json.aaData.length ; i<ien ; i++ ) {
* json.aaData[i].sum = json.aaData[i].one + json.aaData[i].two;
* }
* // Note no return - manipulate the data directly in the JSON object.
* } );
*/
/**
* Destroy event, fired when the DataTable is destroyed by calling fnDestroy
* or passing the bDestroy:true parameter in the initialisation object. This
* can be used to remove bound events, added DOM nodes, etc.
* @name DataTable#destroy.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
*/
/**
* Page length change event, fired when number of records to show on each
* page (the length) is changed.
* @name DataTable#length.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
* @param {integer} len New length
*/
/**
* Column sizing has changed.
* @name DataTable#column-sizing.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
*/
/**
* Column visibility has changed.
* @name DataTable#column-visibility.dt
* @event
* @param {event} e jQuery event object
* @param {object} o DataTables settings object {@link DataTable.models.oSettings}
* @param {int} column Column index
* @param {bool} vis `false` if column now hidden, or `true` if visible
*/
return $.fn.dataTable;
}));
| erCustom( setti |
command.rs | use std::rc::Rc;
use prettytable::{cell, row, table};
use crate::{storage::DataStore, Error, Result};
use super::Cmd;
/// `CmdRecord` is used to get gaming data from storage.
pub struct CmdRecord {
store: Rc<DataStore>,
}
impl CmdRecord {
pub fn new(store: Rc<DataStore>) -> Self {
CmdRecord { store }
}
}
impl Cmd for CmdRecord {
fn name(&self) -> &str {
"record"
}
fn usage(&self) -> &str {
"record <music-id | music-name>"
}
fn description(&self) -> &str {
"get music record by the music id or name."
}
fn do_cmd(&self, args: &[String]) -> Result<()> {
if args.len() < 1 {
return Err(Error::DoCmdError(String::from("args unmatched.")));
}
let records = if let Ok(music_id) = args[0].as_str().parse::<u16>() {
// the first arg is u16, to get record by id
self.store.get_record_by_id(vec![music_id])
} else {
// else, all the args remain are join to music name,
// and get record by the name.
let name = args.join(" ");
self.store.get_record_by_name(name)
};
if records.len() > 0 {
let mut tab = table!([
"music id",
"music name",
"difficulty",
"level",
"score",
"grade",
"clear type",
"volforce"
]);
for rec in &records {
tab.add_row(row![
rec.get_music_id(),
rec.get_music_name_str(),
rec.get_difficulty(),
rec.get_level(),
rec.get_score(),
rec.get_grade(),
rec.get_clear_type(),
rec.get_volforce(),
]);
}
tab.printstd();
println!("{} record(s) founded.", records.len());
} else {
return Err(Error::DoCmdError(String::from(
"The music record not found.",
)));
}
Ok(())
}
}
pub struct CmdBest50 {
store: Rc<DataStore>,
}
impl CmdBest50 {
pub fn new(store: Rc<DataStore>) -> Self {
CmdBest50 { store }
}
}
impl Cmd for CmdBest50 {
fn name(&self) -> &str {
"best50"
}
fn usage(&self) -> &str {
"best50"
}
fn description(&self) -> &str {
"get the best 50 records in volforce order."
}
fn do_cmd(&self, _: &[String]) -> Result<()> {
let records = self.store.get_best50_records();
if records.len() > 0 {
let mut tab = table!([
"rank",
"music id",
"music name",
"difficulty",
"level",
"score",
"grade",
"clear type",
"volforce"
]);
for (i, rec) in records.iter().enumerate() {
tab.add_row(row![
format!("#{}", i + 1),
rec.get_music_id(),
rec.get_music_name_str(),
rec.get_difficulty(),
rec.get_level(),
rec.get_score(),
rec.get_grade(),
rec.get_clear_type(),
rec.get_volforce(),
]);
}
tab.printstd();
println!("{} record(s) founded.", records.len());
} else {
return Err(Error::DoCmdError(String::from(
"The music record not found.",
)));
}
Ok(())
}
}
pub struct CmdVolforce {
store: Rc<DataStore>, | }
impl CmdVolforce {
pub fn new(store: Rc<DataStore>) -> Self {
CmdVolforce { store }
}
}
impl Cmd for CmdVolforce {
fn name(&self) -> &str {
"vf"
}
fn usage(&self) -> &str {
"vf"
}
fn description(&self) -> &str {
"compute and print your volforce."
}
fn do_cmd(&self, _: &[String]) -> Result<()> {
let vf = self.store.get_volforce();
println!("Your Volforce: {}", vf);
Ok(())
}
}
pub struct CmdCount {
store: Rc<DataStore>,
}
impl CmdCount {
pub fn new(store: Rc<DataStore>) -> Self {
CmdCount { store }
}
}
impl Cmd for CmdCount {
fn name(&self) -> &str {
"count"
}
fn usage(&self) -> &str {
"count <all | level>"
}
fn description(&self) -> &str {
"count the grades of one level(or all)"
}
fn do_cmd(&self, args: &[String]) -> Result<()> {
if args.len() != 1 {
return Err(Error::DoCmdError(String::from("args unmatched.")));
}
let stats = if let Ok(level) = args[0].as_str().parse::<u8>() {
if level < 1 || level > 20 {
return Err(Error::DoCmdError(String::from("args unmatched.")));
}
self.store.get_level_stat(Some(level))
} else if args[0].as_str() == "all" {
self.store.get_level_stat(None)
} else {
return Err(Error::DoCmdError(String::from("args unmatched.")));
};
let mut tab = table!([
"level",
"S",
"AAA+",
"AAA",
"PUC",
"UC",
"HC",
"NC",
"played/total"
]);
for s in stats.iter() {
tab.add_row(row![
s.level(),
s.s_num(),
s.tap_num(),
s.ta_num(),
s.puc_num(),
s.uc_num(),
s.hc_num(),
s.nc_num(),
format!("{}/{}", s.played(), self.store.get_level_count(*s.level())),
]);
}
tab.printstd();
Ok(())
}
} | |
utils.py | import base64
import math
import platform
import tkinter as tk
from tkinter import ttk
import PIL
from PIL import Image, ImageTk, ImageColor, ImageDraw, ImageFilter
import hashlib
import io
def identify_operating_system():
"""identify current operating system
Returns:
(str): 'Windows', 'Linux', or 'Darwin' for mac
"""
return platform.system()
def calc_md5(binary_data):
return hashlib.md5(binary_data).hexdigest()
def generate_unique_name(*args):
"""get md5 encoding for any arguments that have a string representation
Returns:
md5 string
"""
name = ''.join([str(x) for x in args])
try:
name = calc_md5(name.encode())
except:
pass
return name
def invert_color(color):
"""return inverted hex color
"""
color = color_to_rgba(color)
r, g, b, a = color
inverted_color = rgb2hex(255 - r, 255 - g, 255 - b)
return inverted_color
def rgb2hex(r, g, b):
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
def change_img_color(img, new_color, old_color=None):
"""Change image color
Args:
img: pillow image
new_color (str): new image color, ex: 'red', '#ff00ff', (255, 0, 0), (255, 0, 0, 255)
old_color (str): color to be replaced, if omitted, all colors will be replaced with new color keeping
alpha channel.
Returns:
pillow image
"""
# convert image to RGBA color scheme
img = img.convert('RGBA')
# load pixels data
pixdata = img.load()
# handle color
new_color = color_to_rgba(new_color)
old_color = color_to_rgba(old_color)
for y in range(img.size[1]):
for x in range(img.size[0]):
alpha = pixdata[x, y][-1]
if old_color:
if pixdata[x, y] == old_color:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
else:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
return img
def resize_img(img, size, keep_aspect_ratio=True):
"""resize image using pillow
Args:
img (PIL.Image): pillow image object
size(int or tuple(in, int)): width of image or tuple of (width, height)
keep_aspect_ratio(bool): maintain aspect ratio relative to width
Returns:
(PIL.Image): pillow image
"""
if isinstance(size, int):
size = (size, size)
# get ratio
width, height = img.size
requested_width = size[0]
if keep_aspect_ratio:
ratio = width / requested_width
requested_height = height / ratio
else:
requested_height = size[1]
size = (int(requested_width), int(requested_height))
img = img.resize(size, resample=PIL.Image.LANCZOS)
return img
def mix_images(background_img, foreground_img):
"""paste an image on top of another image
Args:
background_img: pillow image in background
foreground_img: pillow image in foreground
Returns:
pillow image
"""
background_img = background_img.convert('RGBA')
foreground_img = foreground_img.convert('RGBA')
img_w, img_h = foreground_img.size
bg_w, bg_h = background_img.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background_img.paste(foreground_img, offset, mask=foreground_img)
return background_img
def color_to_rgba(color):
"""Convert color names or hex notation to RGBA,
Args:
color (str): color e.g. 'white' or '#333' or formats like #rgb or #rrggbb
Returns:
(4-tuple): tuple of format (r, g, b, a) e.g. it will return (255, 0, 0, 255) for solid red
"""
if color is None:
return None
if isinstance(color, (tuple, list)):
if len(color) == 3:
r, g, b = color
color = (r, g, b, 255)
return color
else:
return ImageColor.getcolor(color, 'RGBA')
def is_dark(color):
"""rough check if color is dark or light
Returns:
(bool): True if color is dark, False if light
"""
r, g, b, a = color_to_rgba(color)
# calculate lumina, reference https://stackoverflow.com/a/1855903
lumina = (0.299 * r + 0.587 * g + 0.114 * b) / 255
return True if lumina < 0.6 else False
def calc_font_color(bg):
"""calculate font color based on given background
Args:
bg (str): background color
Returns:
(str): color name, e.g. "white" for dark background and "black" for light background
"""
return 'white' if is_dark(bg) else 'black'
def calc_contrast_color(color, offset):
"""calculate a contrast color
for darker colors will get a slightly lighter color depend on "offset" and for light colors will get a darker color
Args:
color (str): color
offset (int): 1 to 254
Returns:
(str): color
"""
r, g, b, a = color_to_rgba(color)
if is_dark(color):
new_color = [x + offset if x + offset <= 255 else 255 for x in (r, g, b)]
else:
new_color = [x - offset if x - offset >= 0 else 0 for x in (r, g, b)]
return rgb2hex(*new_color)
def | (text, text_color, bg_color, size):
"""Not implemented"""
pass
# img = Image.new('RGBA', size, color_to_rgba(text_color))
# draw = ImageDraw.Draw(img)
# font = ImageFont.truetype(current_path + "s.ttf", size - int(0.15 * width))
# draw.text((pad, -pad), str(num), font=font, fill=color_to_rgba(bg_color))
def create_pil_image(fp=None, color=None, size=None, b64=None):
"""create pillow Image object
Args:
fp: A filename (string), pathlib.Path object or a file object. The file object must implement read(), seek(),
and tell() methods, and be opened in binary mode.
color (str): color in tkinter format, e.g. 'red', '#3300ff', also color can be a tuple or a list of RGB,
e.g. (255, 0, 255)
size (int or 2-tuple(int, int)): an image required size in a (width, height) tuple
b64 (str): base64 hex representation of an image, if "fp" is given this parameter will be ignored
Returns:
pillow image object
"""
if not fp and b64:
fp = io.BytesIO(base64.b64decode(b64))
img = Image.open(fp)
# change color
if color:
img = change_img_color(img, color)
# resize
if size:
if isinstance(size, int):
size = (size, size)
img = resize_img(img, size)
return img
def create_image(fp=None, img=None, color=None, size=None, b64=None):
"""create tkinter PhotoImage object
it can modify size and color of original image
Args:
fp: A filename (string), pathlib.Path object or a file object. The file object must implement read(), seek(),
and tell() methods, and be opened in binary mode.
img (pillow image): if exist fp or b64 arguments will be ignored
color (str): color in tkinter format, e.g. 'red', '#3300ff', also color can be a tuple or a list of RGB,
e.g. (255, 0, 255)
size (int or 2-tuple(int, int)): an image required size in a (width, height) tuple
b64 (str): base64 hex representation of an image, if "fp" is given this parameter will be ignored
Returns:
tkinter PhotoImage object
"""
# create pillow image
if not img:
img = create_pil_image(fp, color, size, b64)
# create tkinter images using pillow ImageTk
img = ImageTk.PhotoImage(img)
return img
def create_circle(size=100, thickness=None, color='black', fill=None, antialias=4, offset=0):
"""create high quality circle
the idea to smooth circle line is to draw a bigger size circle and then resize it to the requested size
inspired from https://stackoverflow.com/a/34926008
Args:
size (tuple or list, or int): outer diameter of the circle or width of bounding box
thickness (int): outer line thickness in pixels
color (str): outer line color
fill (str): fill color, default is a transparent fill
antialias (int): used to enhance outer line quality and make it smoother
offset (int): correct cut edges of circle outline
Returns:
PIL image: a circle on a transparent image
"""
if isinstance(size, int):
size = (size, size)
else:
size = size
fill_color = color_to_rgba(fill) or '#0000'
requested_size = size
# calculate thickness to be 2% of circle diameter
thickness = thickness or max(size[0] * 2 // 100, 2)
offset = offset or thickness // 2
# make things bigger
size = [x * antialias for x in requested_size]
thickness *= antialias
# create a transparent image with a big size
img = Image.new(size=size, mode='RGBA', color='#0000')
draw = ImageDraw.Draw(img)
# draw circle with a required color
draw.ellipse([offset, offset, size[0] - offset, size[1] - offset], outline=color, fill=fill_color, width=thickness)
img = img.filter(ImageFilter.BLUR)
# resize image back to the requested size
img = img.resize(requested_size, Image.LANCZOS)
# change color again will enhance quality (weird)
if fill:
img = change_img_color(img, color, old_color=color)
img = change_img_color(img, fill, old_color=fill)
else:
img = change_img_color(img, color)
return img
def apply_gradient(img, gradient='vertical', colors=None, keep_transparency=True):
"""apply gradient color for pillow image
Args:
img: pillow image
gradient (str): vertical, horizontal, diagonal, radial
colors (iterable): 2-colors for the gradient
keep_transparency (bool): keep original transparency
"""
size = img.size
colors = colors or ['black', 'white']
color1 = color_to_rgba(colors[0])
color2 = color_to_rgba(colors[1])
# load pixels data
pixdata = img.load()
if gradient in ('horizontal', 'vertical', 'diagonal'):
for x in range(0, size[0]):
for y in range(0, size[1]):
if gradient == 'horizontal':
ratio1 = x / size[1]
elif gradient == 'vertical':
ratio1 = y / size[1]
elif gradient == 'diagonal':
ratio1 = (y + x) / size[1]
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
# Place the pixel
img.putpixel((x, y), (r, g, b, a))
elif gradient == 'radial': # inspired by https://stackoverflow.com/a/30669765
d = min(size)
radius = d // 2
for x in range(0, size[0]):
for y in range(0, size[1]):
# Find the distance to the center
distance_to_center = math.sqrt((x - size[0] / 2) ** 2 + (y - size[1] / 2) ** 2)
ratio1 = distance_to_center / radius
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
# Place the pixel
img.putpixel((x, y), (r, g, b, a))
return img
def scroll_with_mousewheel(widget, target=None, modifier='Shift', apply_to_children=False):
"""scroll a widget with mouse wheel
Args:
widget: tkinter widget
target: scrollable tkinter widget, in case you need "widget" to catch mousewheel event and make another widget
to scroll, useful for child widget in a scrollable frame
modifier (str): Modifier to use with mousewheel to scroll horizontally, default is shift key
apply_to_children (bool): bind all children
Examples:
scroll_with_mousewheel(my_text_widget, target='my_scrollable_frame')
to make a scrollable canvas:
for w in my_canvas:
scroll_with_mousewheel(w, target=my_canvas)
"""
def _scroll_with_mousewheel(widget):
target_widget = target if target else widget
def scroll_vertically(event):
# scroll vertically ----------------------------------
if event.num == 4 or event.delta > 0:
target_widget.yview_scroll(-1, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.yview_scroll(1, "unit")
return 'break'
# bind events for vertical scroll ----------------------------------------------
if hasattr(target_widget, 'yview_scroll'):
# linux
widget.bind("<Button-4>", scroll_vertically, add='+')
widget.bind("<Button-5>", scroll_vertically, add='+')
# windows and mac
widget.bind("<MouseWheel>", scroll_vertically, add='+')
# scroll horizontally ---------------------------------------
def scroll_horizontally(event):
# scroll horizontally
if event.num == 4 or event.delta > 0:
target_widget.xview_scroll(-10, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.xview_scroll(10, "unit")
return 'break'
# bind events for horizontal scroll ----------------------------------------------
if hasattr(target_widget, 'xview_scroll'):
# linux
widget.bind(f"<{modifier}-Button-4>", scroll_horizontally, add='+')
widget.bind(f"<{modifier}-Button-5>", scroll_horizontally, add='+')
# windows and mac
widget.bind(f"<{modifier}-MouseWheel>", scroll_horizontally, add='+')
_scroll_with_mousewheel(widget)
def handle_children(w):
for child in w.winfo_children():
_scroll_with_mousewheel(child)
# recursive call
if child.winfo_children():
handle_children(child)
if apply_to_children:
handle_children(widget)
def unbind_mousewheel(widget):
"""unbind mousewheel for a specific widget, e.g. combobox which have mouswheel scroll by default"""
# linux
widget.unbind("<Button-4>")
widget.unbind("<Button-5>")
# windows and mac
widget.unbind("<MouseWheel>")
def get_widget_attribute(widget, attr):
"""get an attribute of a widget
Args:
widget: tkinter widget "tk or ttk"
attr (str): attribute or property e.g. 'background'
Returns:
attribute value, e.g. '#ffffff' for a background color
"""
# if it is ttk based will get style applied, it will raise an error if the widget not a ttk
try:
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
value = s.lookup(style_name, attr)
return value
except:
pass
try:
# if it's a tk widget will use cget
return widget.cget(attr)
except:
pass
return None
def configure_widget(widget, **kwargs):
"""configure widget's attributes"""
for k, v in kwargs.items():
# set widget attribute
try:
# treat as a "tk" widget, it will raise if widget is a "ttk"
widget.config(**{k: v})
continue
except:
pass
try:
# in case above failed, it might be a ttk widget
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
s.configure(style_name, **{k: v})
except:
pass
def set_default_theme():
# select tkinter theme required for things to be right on windows,
# only 'alt', 'default', or 'classic' can work fine on windows 10
s = ttk.Style()
s.theme_use('default')
def theme_compatibility_check(print_warning=False):
"""check if current theme is compatible
Return:
bool: True or False
"""
compatible_themes = ['alt', 'default', 'classic']
s = ttk.Style()
current_theme = s.theme_use()
if current_theme not in compatible_themes:
if print_warning:
print(f'AwesomeTkinter Warning: Widgets might not work properly under current theme ({current_theme})\n'
f"compatible_themes are ['alt', 'default', 'classic']\n"
f"you can set default theme using atk.set_default_theme() or style.theme_use('default')")
return False
return True
def center_window(window, width=None, height=None, set_geometry_wh=True, reference=None):
"""center a tkinter window on screen's center and set its geometry if width and height given
Args:
window (tk.root or tk.Toplevel): a window to be centered
width (int): window's width
height (int): window's height
set_geometry_wh (bool): include width and height in geometry
reference: tk window e.g parent window as a reference
"""
# update_idletasks will cause a window to show early at the top left corner
# then change position to center in non-proffesional way
# window.update_idletasks()
if width and height:
if reference:
refx = reference.winfo_x() + reference.winfo_width() // 2
refy = reference.winfo_y() + reference.winfo_height() // 2
else:
refx = window.winfo_screenwidth() // 2
refy = window.winfo_screenheight() // 2
x = refx - width // 2
y = refy - height // 2
if set_geometry_wh:
window.geometry(f'{width}x{height}+{x}+{y}')
else:
window.geometry(f'+{x}+{y}')
else:
window.eval('tk::PlaceWindow . center')
__all__ = ['identify_operating_system', 'calc_md5', 'generate_unique_name', 'invert_color', 'rgb2hex',
'change_img_color', 'resize_img', 'mix_images', 'color_to_rgba', 'is_dark', 'calc_font_color',
'calc_contrast_color', 'text_to_image', 'create_pil_image', 'create_image', 'create_circle',
'scroll_with_mousewheel', 'unbind_mousewheel', 'get_widget_attribute', 'ImageTk', 'set_default_theme',
'theme_compatibility_check', 'configure_widget', 'center_window']
| text_to_image |
day_18.rs | use regex::Regex;
use std::cell::RefCell;
use std::rc::Rc;
#[derive(Clone, Eq, PartialEq)]
enum Number {
Literal(Rc<RefCell<i64>>),
Pair {
left: Rc<RefCell<Number>>,
right: Rc<RefCell<Number>>,
},
}
impl std::fmt::Display for Number {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
match self {
Self::Literal(num) => write!(f, "{}", *num.borrow()),
Self::Pair { left, right } => write!(f, "[{},{}]", &*left.borrow(), &*right.borrow()),
}
}
}
impl std::fmt::Debug for Number {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
std::fmt::Display::fmt(self, f)
}
}
#[derive(Clone, Debug)]
struct ReduceContext {
last_left_num: Option<Rc<RefCell<i64>>>,
depth: usize,
stack: Vec<Rc<RefCell<Number>>>,
}
impl ReduceContext {
fn new() -> ReduceContext {
ReduceContext {
last_left_num: None,
depth: 0,
stack: vec![],
}
}
}
fn find_rhs_num(curr: Option<Rc<RefCell<Number>>>, ctx: ReduceContext) -> Option<Rc<RefCell<i64>>> {
match curr {
None => {
if ctx.stack.is_empty() {
None
} else {
find_rhs_num(
ctx.stack.last().cloned(),
ReduceContext {
stack: ctx.stack.split_at(ctx.stack.len() - 1).1.to_vec(),
..ctx
},
)
}
}
Some(num) => match &*num.borrow() {
Number::Literal(n) => Some(n.clone()),
Number::Pair { left, right } => {
let mut stack = ctx.stack.clone();
stack.push(right.clone());
let lhs_result = find_rhs_num(
Some(left.clone()),
ReduceContext {
stack,
..ctx.clone()
},
);
if lhs_result.is_some() {
lhs_result
} else {
find_rhs_num(Some(right.clone()), ctx)
}
}
},
}
}
impl std::ops::Add<Number> for Number {
type Output = Number;
fn add(self, rhs: Number) -> Number {
let mut sum = Number::Pair {
left: Rc::new(RefCell::new(self)),
right: Rc::new(RefCell::new(rhs)),
};
sum.reduce();
sum
}
}
impl Number {
fn deep_clone(&self) -> Number {
match self {
Number::Literal(num) => Number::of_num(*num.borrow()),
Number::Pair { left, right } => Number::Pair {
left: Rc::new(RefCell::new(left.borrow().deep_clone())),
right: Rc::new(RefCell::new(right.borrow().deep_clone())),
},
}
}
fn from_str(s: &str) -> Number {
parse(&tokenize(s)).0
}
fn of_num(num: i64) -> Number {
Number::Literal(Rc::new(RefCell::new(num)))
}
fn magnitude(&self) -> i64 {
match self {
Number::Literal(num) => *num.borrow(),
Number::Pair { left, right } => {
3 * left.borrow().magnitude() + 2 * right.borrow().magnitude()
}
}
}
fn reduce(&mut self) {
while self.reduce_once() {}
}
fn | (&mut self) -> bool {
if !self._reduce_explode(&mut ReduceContext::new()) {
self._reduce_split()
} else {
true
}
}
fn _reduce_split(&mut self) -> bool {
match self {
Self::Literal(num) => {
assert!(*num.borrow() < 10);
return false;
}
Self::Pair { left, right } => {
let mut left_mut = left.borrow_mut();
match &*left_mut {
Self::Literal(num) => {
let val = *num.borrow();
if val >= 10 {
*left_mut = Number::Pair {
left: Rc::new(RefCell::new(Number::of_num(val / 2))),
right: Rc::new(RefCell::new(Number::of_num(val / 2 + (val % 2)))),
};
return true;
}
}
Self::Pair { .. } => {
let l_result = left_mut._reduce_split();
if l_result {
return l_result;
}
}
};
let mut right_mut = right.borrow_mut();
match &*right_mut {
Self::Literal(num) => {
let val = *num.borrow();
if val >= 10 {
*right_mut = Number::Pair {
left: Rc::new(RefCell::new(Number::of_num(val / 2))),
right: Rc::new(RefCell::new(Number::of_num(val / 2 + (val % 2)))),
};
return true;
}
}
Self::Pair { .. } => {
let r_result = right_mut._reduce_split();
if r_result {
return r_result;
}
}
};
}
}
false
}
fn _reduce_explode(&mut self, ctx: &mut ReduceContext) -> bool {
match self {
Self::Literal(num) => {
ctx.last_left_num = Some(num.clone());
return false;
}
Self::Pair { left, right } => {
if ctx.depth == 3 {
let mut borrowed_left = left.borrow_mut();
match &*borrowed_left {
Self::Pair {
left: ll,
right: lr,
} => {
// Explode!
if let Some(r) = find_rhs_num(Some(right.clone()), ctx.clone()) {
match &*lr.borrow() {
Number::Literal(num) => {
*r.borrow_mut() += *num.borrow();
}
_ => {
panic!("Found pair at depth 4");
}
}
}
if let Some(l) = &ctx.last_left_num {
match &*ll.borrow() {
Number::Literal(num) => {
*l.borrow_mut() += *num.borrow();
}
_ => {
panic!("Found pair at depth 4");
}
}
}
*borrowed_left = Number::of_num(0);
return true;
}
Self::Literal(num) => ctx.last_left_num = Some(num.clone()),
};
let mut borrowed_right = right.borrow_mut();
match &*borrowed_right {
Self::Pair {
left: rl,
right: rr,
} => {
// Explode!
if let Some(r) = find_rhs_num(None, ctx.clone()) {
match &*rr.borrow() {
Number::Literal(num) => {
*r.borrow_mut() += *num.borrow();
}
_ => {
panic!("Found pair at depth 4");
}
}
}
if let Some(l) = &ctx.last_left_num {
match &*rl.borrow() {
Number::Literal(num) => {
*l.borrow_mut() += *num.borrow();
}
_ => {
panic!("Found pair at depth 4");
}
}
}
*borrowed_right = Number::of_num(0);
return true;
}
Self::Literal(num) => ctx.last_left_num = Some(num.clone()),
}
} else {
let old_stack = ctx.stack.clone();
let mut stack = old_stack.clone();
stack.push(right.clone());
ctx.depth += 1;
ctx.stack = stack;
let left_result = left.borrow_mut()._reduce_explode(ctx);
ctx.stack = old_stack;
if left_result {
ctx.depth -= 1;
return left_result;
}
let right_result = right.borrow_mut()._reduce_explode(ctx);
ctx.depth -= 1;
return right_result;
}
}
};
false
}
}
#[derive(Clone, Copy, Debug)]
enum Token {
Num(i64),
Open,
Close,
}
fn next_token(s: &str) -> (Token, &str) {
if s.is_empty() {
panic!("Malformed string");
}
if s.starts_with('[') {
(Token::Open, s.split_at(1).1)
} else if s.starts_with(']') {
(Token::Close, s.split_at(1).1)
} else if let Some(num) = Regex::new(r"^(\d+)")
.unwrap()
.captures(s)
.and_then(|caps| caps.get(1))
.map(|it| it.as_str())
{
(
Token::Num(num.parse().unwrap()),
s.trim_start_matches(char::is_numeric),
)
} else {
next_token(s.split_at(1).1)
}
}
fn tokenize(s: &str) -> Vec<Token> {
let mut curr = s;
let mut output: Vec<Token> = vec![];
while !curr.is_empty() {
let (t, rest) = next_token(curr);
curr = rest;
output.push(t)
}
output
}
fn parse(tokens: &[Token]) -> (Number, &[Token]) {
match tokens[0] {
Token::Num(n) => (Number::of_num(n), &tokens[1..]),
Token::Open => {
let (left, rest) = parse(&tokens[1..]);
let (right, rest) = parse(rest);
if !matches!(rest[0], Token::Close) {
panic!("Expected closing delimiter, got: {:?}", rest[0]);
}
(
Number::Pair {
left: Rc::new(RefCell::new(left)),
right: Rc::new(RefCell::new(right)),
},
&rest[1..],
)
}
Token::Close => {
panic!("Mismatched closing delimiter.")
}
}
}
fn main() {
let summed = adventofcode2021::input_lines(18)
.iter()
.map(|s| Number::from_str(s))
.map(|mut it| {
it.reduce();
it
})
.reduce(|acc, el| acc + el);
let mag = summed.unwrap().magnitude();
println!("Part 1: {}", mag);
let parsed: Vec<Number> = adventofcode2021::input_lines(18)
.iter()
.map(|s| Number::from_str(s))
.collect();
let mut max_mag = 0;
for (i_idx, i) in parsed.iter().enumerate() {
for (j_idx, j) in parsed.iter().enumerate() {
if i_idx == j_idx {
continue;
}
max_mag = std::cmp::max((i.deep_clone() + j.deep_clone()).magnitude(), max_mag);
}
}
println!("Part 2: {}", max_mag);
}
#[cfg(test)]
mod tests {
use super::*;
fn explode_assert(s: &str, expected: &str) {
let mut num = Number::from_str(s);
num._reduce_explode(&mut ReduceContext::new());
let expected = Number::from_str(expected);
assert_eq!(num, expected);
}
fn split_assert(s: &str, expected: &str) {
let mut num = Number::from_str(s);
num._reduce_split();
let expected = Number::from_str(expected);
assert_eq!(num, expected);
}
#[test]
fn test_explode() {
explode_assert("[[[[[9,8],1],2],3],4]", "[[[[0,9],2],3],4]");
explode_assert("[7,[6,[5,[4,[3,2]]]]]", "[7,[6,[5,[7,0]]]]");
explode_assert("[[6,[5,[4,[3,2]]]],1]", "[[6,[5,[7,0]]],3]");
explode_assert(
"[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]",
"[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]",
);
explode_assert(
"[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]",
"[[3,[2,[8,0]]],[9,[5,[7,0]]]]",
);
}
#[test]
fn test_split() {
split_assert("[10,7]", "[[5,5],7]");
split_assert("[11,7]", "[[5,6],7]");
}
#[test]
fn test_reduce() {
let mut num = Number::from_str("[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]");
num.reduce_once();
let expected = Number::from_str("[[[[0,7],4],[7,[[8,4],9]]],[1,1]]");
assert_eq!(num, expected);
num.reduce_once();
let expected = Number::from_str("[[[[0,7],4],[15,[0,13]]],[1,1]]");
assert_eq!(num, expected);
num.reduce_once();
let expected = Number::from_str("[[[[0,7],4],[[7,8],[0,13]]],[1,1]]");
assert_eq!(num, expected);
num.reduce_once();
let expected = Number::from_str("[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]");
assert_eq!(num, expected);
num.reduce_once();
let expected = Number::from_str("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]");
assert_eq!(num, expected);
let mut num = Number::from_str("[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]");
num.reduce();
let expected = Number::from_str("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]");
assert_eq!(num, expected);
}
#[test]
fn test_sum_magnitude() {
let n0 = Number::from_str("[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]");
let n1 = Number::from_str("[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]");
let sum = n0 + n1;
let expected =
Number::from_str("[[[[7,8],[6,6]],[[6,0],[7,7]]],[[[7,8],[8,8]],[[7,9],[0,6]]]]");
assert_eq!(sum, expected);
let expected_mag: i64 = 3993;
assert_eq!(sum.magnitude(), expected_mag);
}
}
| reduce_once |
server_test.go | // +build !go1.9
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package comm_test
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"path/filepath"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/transport"
"github.com/hyperledger/fabric/core/comm"
testpb "github.com/hyperledger/fabric/core/comm/testdata/grpc"
)
//Embedded certificates for testing
//These are the prime256v1-openssl-*.pem in testdata
//The self-signed cert expires in 2026
var selfSignedKeyPEM = `-----BEGIN EC PARAMETERS-----
BggqhkjOPQMBBw==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIM2rUTflEQ11m5g5yEm2Cer2yI+ziccl1NbSRVh3GUR0oAoGCCqGSM49
AwEHoUQDQgAEu2FEZVSr30Afey6dwcypeg5P+BuYx5JSYdG0/KJIBjWKnzYo7FEm
gMir7GbNh4pqA8KFrJZkPuxMgnEJBZTv+w==
-----END EC PRIVATE KEY-----
`
var selfSignedCertPEM = `-----BEGIN CERTIFICATE-----
MIICRDCCAemgAwIBAgIJALwW//dz2ZBvMAoGCCqGSM49BAMCMH4xCzAJBgNVBAYT
AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2Nv
MRgwFgYDVQQKDA9MaW51eEZvdW5kYXRpb24xFDASBgNVBAsMC0h5cGVybGVkZ2Vy
MRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTYxMjA0MjIzMDE4WhcNMjYxMjAyMjIz
MDE4WjB+MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UE
BwwNU2FuIEZyYW5jaXNjbzEYMBYGA1UECgwPTGludXhGb3VuZGF0aW9uMRQwEgYD
VQQLDAtIeXBlcmxlZGdlcjESMBAGA1UEAwwJbG9jYWxob3N0MFkwEwYHKoZIzj0C
AQYIKoZIzj0DAQcDQgAEu2FEZVSr30Afey6dwcypeg5P+BuYx5JSYdG0/KJIBjWK
nzYo7FEmgMir7GbNh4pqA8KFrJZkPuxMgnEJBZTv+6NQME4wHQYDVR0OBBYEFAWO
4bfTEr2R6VYzQYrGk/2VWmtYMB8GA1UdIwQYMBaAFAWO4bfTEr2R6VYzQYrGk/2V
WmtYMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhAIelqGdxPMHmQqRF
zA85vv7JhfMkvZYGPELC7I2K8V7ZAiEA9KcthV3HtDXKNDsA6ULT+qUkyoHRzCzr
A4QaL2VU6i4=
-----END CERTIFICATE-----
`
var badPEM = `-----BEGIN CERTIFICATE-----
MIICRDCCAemgAwIBAgIJALwW//dz2ZBvMAoGCCqGSM49BAMCMH4xCzAJBgNVBAYT
AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2Nv
MRgwFgYDVQQKDA9MaW51eEZvdW5kYXRpb24xFDASBgNVBAsMC0h5cGVybGVkZ2Vy
MRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTYxMjA0MjIzMDE4WhcNMjYxMjAyMjIz
MDE4WjB+MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UE
BwwNU2FuIEZyYW5jaXNjbzEYMBYGA1UECgwPTGludXhGb3VuZGF0aW9uMRQwEgYD
VQQLDAtIeXBlcmxlZGdlcjESMBAGA1UEAwwJbG9jYWxob3N0MFkwEwYHKoZIzj0C
-----END CERTIFICATE-----
`
var pemNoCertificateHeader = `-----BEGIN NOCERT-----
MIICRDCCAemgAwIBAgIJALwW//dz2ZBvMAoGCCqGSM49BAMCMH4xCzAJBgNVBAYT
AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2Nv
MRgwFgYDVQQKDA9MaW51eEZvdW5kYXRpb24xFDASBgNVBAsMC0h5cGVybGVkZ2Vy
MRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTYxMjA0MjIzMDE4WhcNMjYxMjAyMjIz
MDE4WjB+MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UE
BwwNU2FuIEZyYW5jaXNjbzEYMBYGA1UECgwPTGludXhGb3VuZGF0aW9uMRQwEgYD
VQQLDAtIeXBlcmxlZGdlcjESMBAGA1UEAwwJbG9jYWxob3N0MFkwEwYHKoZIzj0C
AQYIKoZIzj0DAQcDQgAEu2FEZVSr30Afey6dwcypeg5P+BuYx5JSYdG0/KJIBjWK
nzYo7FEmgMir7GbNh4pqA8KFrJZkPuxMgnEJBZTv+6NQME4wHQYDVR0OBBYEFAWO
4bfTEr2R6VYzQYrGk/2VWmtYMB8GA1UdIwQYMBaAFAWO4bfTEr2R6VYzQYrGk/2V
WmtYMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhAIelqGdxPMHmQqRF
zA85vv7JhfMkvZYGPELC7I2K8V7ZAiEA9KcthV3HtDXKNDsA6ULT+qUkyoHRzCzr
A4QaL2VU6i4=
-----END NOCERT-----
`
var timeout = time.Second * 1
var testOrgs = []testOrg{}
func init() {
//load up crypto material for test orgs
for i := 1; i <= numOrgs; i++ {
testOrg, err := loadOrg(i)
if err != nil {
log.Fatalf("Failed to load test organizations due to error: %s", err.Error())
}
testOrgs = append(testOrgs, testOrg)
}
}
//test server to be registered with the GRPCServer
type testServiceServer struct{}
func (tss *testServiceServer) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) {
return new(testpb.Empty), nil
}
//invoke the EmptyCall RPC
func invokeEmptyCall(address string, dialOptions []grpc.DialOption) (*testpb.Empty, error) {
//add DialOptions
dialOptions = append(dialOptions, grpc.WithBlock())
dialOptions = append(dialOptions, grpc.WithTimeout(timeout))
//create GRPC client conn
clientConn, err := grpc.Dial(address, dialOptions...)
if err != nil {
return nil, err
}
defer clientConn.Close()
//create GRPC client
client := testpb.NewTestServiceClient(clientConn)
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
//invoke service
empty, err := client.EmptyCall(ctx, new(testpb.Empty))
if err != nil {
return nil, err
}
return empty, nil
}
const (
numOrgs = 2
numChildOrgs = 2
numClientCerts = 2
numServerCerts = 2
)
//string for cert filenames
var (
orgCAKey = filepath.Join("testdata", "certs", "Org%d-key.pem")
orgCACert = filepath.Join("testdata", "certs", "Org%d-cert.pem")
orgServerKey = filepath.Join("testdata", "certs", "Org%d-server%d-key.pem")
orgServerCert = filepath.Join("testdata", "certs", "Org%d-server%d-cert.pem")
orgClientKey = filepath.Join("testdata", "certs", "Org%d-client%d-key.pem")
orgClientCert = filepath.Join("testdata", "certs", "Org%d-client%d-cert.pem")
childCAKey = filepath.Join("testdata", "certs", "Org%d-child%d-key.pem")
childCACert = filepath.Join("testdata", "certs", "Org%d-child%d-cert.pem")
childServerKey = filepath.Join("testdata", "certs", "Org%d-child%d-server%d-key.pem")
childServerCert = filepath.Join("testdata", "certs", "Org%d-child%d-server%d-cert.pem")
childClientKey = filepath.Join("testdata", "certs", "Org%d-child%d-client%d-key.pem")
childClientCert = filepath.Join("testdata", "certs", "Org%d-child%d-client%d-cert.pem")
)
type testServer struct {
address string
config comm.SecureServerConfig
}
type serverCert struct {
keyPEM []byte
certPEM []byte
}
type testOrg struct {
rootCA []byte
serverCerts []serverCert
clientCerts []tls.Certificate
childOrgs []testOrg
}
//return *X509.CertPool for the rootCA of the org
func (org *testOrg) rootCertPool() *x509.CertPool {
certPool := x509.NewCertPool()
certPool.AppendCertsFromPEM(org.rootCA)
return certPool
}
//return testServers for the org
func (org *testOrg) testServers(port int, clientRootCAs [][]byte) []testServer {
var testServers = []testServer{}
clientRootCAs = append(clientRootCAs, org.rootCA)
//loop through the serverCerts and create testServers
for i, serverCert := range org.serverCerts {
testServer := testServer{
fmt.Sprintf("localhost:%d", port+i),
comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: serverCert.certPEM,
ServerKey: serverCert.keyPEM,
RequireClientCert: true,
ClientRootCAs: clientRootCAs,
},
}
testServers = append(testServers, testServer)
}
return testServers
}
//return trusted clients for the org
func (org *testOrg) trustedClients(serverRootCAs [][]byte) []*tls.Config {
var trustedClients = []*tls.Config{}
//if we have any additional server root CAs add them to the certPool
certPool := org.rootCertPool()
for _, serverRootCA := range serverRootCAs {
certPool.AppendCertsFromPEM(serverRootCA)
}
//loop through the clientCerts and create tls.Configs
for _, clientCert := range org.clientCerts {
trustedClient := &tls.Config{
Certificates: []tls.Certificate{clientCert},
RootCAs: certPool,
}
trustedClients = append(trustedClients, trustedClient)
}
return trustedClients
}
//createCertPool creates an x509.CertPool from an array of PEM-encoded certificates
func createCertPool(rootCAs [][]byte) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, rootCA := range rootCAs {
if !certPool.AppendCertsFromPEM(rootCA) {
return nil, errors.New("Failed to load root certificates")
}
}
return certPool, nil
}
//utility function to load crypto material for organizations
func loadOrg(parent int) (testOrg, error) {
var org = testOrg{}
//load the CA
caPEM, err := ioutil.ReadFile(fmt.Sprintf(orgCACert, parent))
if err != nil {
return org, err
}
//loop through and load servers
var serverCerts = []serverCert{}
for i := 1; i <= numServerCerts; i++ {
keyPEM, err := ioutil.ReadFile(fmt.Sprintf(orgServerKey, parent, i))
if err != nil {
return org, err
}
certPEM, err := ioutil.ReadFile(fmt.Sprintf(orgServerCert, parent, i))
if err != nil {
return org, err
}
serverCerts = append(serverCerts, serverCert{keyPEM, certPEM})
}
//loop through and load clients
var clientCerts = []tls.Certificate{}
for j := 1; j <= numServerCerts; j++ {
clientCert, err := loadTLSKeyPairFromFile(fmt.Sprintf(orgClientKey, parent, j),
fmt.Sprintf(orgClientCert, parent, j))
if err != nil {
return org, err
}
clientCerts = append(clientCerts, clientCert)
}
//loop through and load child orgs
var childOrgs = []testOrg{}
for k := 1; k <= numChildOrgs; k++ {
childOrg, err := loadChildOrg(parent, k)
if err != nil {
return org, err
}
childOrgs = append(childOrgs, childOrg)
}
return testOrg{caPEM, serverCerts, clientCerts, childOrgs}, nil
}
//utility function to load crypto material for child organizations
func loadChildOrg(parent, child int) (testOrg, error) {
var org = testOrg{}
//load the CA
caPEM, err := ioutil.ReadFile(fmt.Sprintf(childCACert, parent, child))
if err != nil {
return org, err
}
//loop through and load servers
var serverCerts = []serverCert{}
for i := 1; i <= numServerCerts; i++ {
keyPEM, err := ioutil.ReadFile(fmt.Sprintf(childServerKey, parent, child, i))
if err != nil {
return org, err
}
certPEM, err := ioutil.ReadFile(fmt.Sprintf(childServerCert, parent, child, i))
if err != nil {
return org, err
}
serverCerts = append(serverCerts, serverCert{keyPEM, certPEM})
}
//loop through and load clients
var clientCerts = []tls.Certificate{}
for j := 1; j <= numServerCerts; j++ {
clientCert, err := loadTLSKeyPairFromFile(fmt.Sprintf(childClientKey, parent, child, j),
fmt.Sprintf(childClientCert, parent, child, j))
if err != nil {
return org, err
}
clientCerts = append(clientCerts, clientCert)
}
return testOrg{caPEM, serverCerts, clientCerts, []testOrg{}}, nil
}
//loadTLSKeyPairFromFile creates a tls.Certificate from PEM-encoded key and cert files
func loadTLSKeyPairFromFile(keyFile, certFile string) (tls.Certificate, error) {
certPEMBlock, err := ioutil.ReadFile(certFile)
keyPEMBlock, err := ioutil.ReadFile(keyFile)
cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
if err != nil {
return tls.Certificate{}, err
}
return cert, nil
}
func TestNewGRPCServerInvalidParameters(t *testing.T) {
t.Parallel()
//missing address
_, err := comm.NewGRPCServer("", comm.SecureServerConfig{UseTLS: false})
//check for error
msg := "Missing address parameter"
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
//missing port
_, err = comm.NewGRPCServer("abcdef", comm.SecureServerConfig{UseTLS: false})
//check for error
msg = "listen tcp: missing port in address abcdef"
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
//bad port
_, err = comm.NewGRPCServer("localhost:1BBB", comm.SecureServerConfig{UseTLS: false})
//check for error
msgs := [2]string{"listen tcp: lookup tcp/1BBB: nodename nor servname provided, or not known",
"listen tcp: unknown port tcp/1BBB"} //different error on MacOS and in Docker
if assert.Error(t, err, "%s or %s expected", msgs[0], msgs[1]) {
assert.Contains(t, msgs, err.Error())
}
if err != nil {
t.Log(err.Error())
}
//bad hostname
_, err = comm.NewGRPCServer("hostdoesnotexist.localdomain:9050",
comm.SecureServerConfig{UseTLS: false})
/*
We cannot check for a specific error message due to the fact that some
systems will automatically resolve unknown host names to a "search"
address so we just check to make sure that an error was returned
*/
assert.Error(t, err, "%s error expected", msg)
if err != nil {
t.Log(err.Error())
}
//address in use
_, err = comm.NewGRPCServer(":9040", comm.SecureServerConfig{UseTLS: false})
_, err = comm.NewGRPCServer(":9040", comm.SecureServerConfig{UseTLS: false})
//check for error
msg = "listen tcp :9040: bind: address already in use"
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
//missing serverCertificate
_, err = comm.NewGRPCServer(":9041",
comm.SecureServerConfig{UseTLS: true, ServerCertificate: []byte{}})
//check for error
msg = "secureConfig must contain both ServerKey and " +
"ServerCertificate when UseTLS is true"
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
//missing serverKey
_, err = comm.NewGRPCServer(":9042",
comm.SecureServerConfig{UseTLS: true, ServerKey: []byte{}})
//check for error
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
//bad serverKey
_, err = comm.NewGRPCServer(":9043",
comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: []byte(selfSignedCertPEM),
ServerKey: []byte{}})
//check for error
msg = "tls: failed to find any PEM data in key input"
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
//bad serverCertificate
_, err = comm.NewGRPCServer(":9044",
comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: []byte{},
ServerKey: []byte(selfSignedKeyPEM)})
//check for error
msg = "tls: failed to find any PEM data in certificate input"
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
//bad clientRootCAs
/** TODO: revisit after figuring out why MSP does not serialize PEMs with type
_, err = comm.NewGRPCServer(":9045",
comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: []byte(selfSignedCertPEM),
ServerKey: []byte(selfSignedKeyPEM),
RequireClientCert: true,
ClientRootCAs: [][]byte{[]byte(pemNoCertificateHeader)}})
//check for error
msg = "Failed to append client root certificate(s): " +
"No client root certificates found"
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
*/
srv, err := comm.NewGRPCServer(":9046",
comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: []byte(selfSignedCertPEM),
ServerKey: []byte(selfSignedKeyPEM),
RequireClientCert: true})
badRootCAs := [][]byte{[]byte(badPEM)}
err = srv.SetClientRootCAs(badRootCAs)
//check for error
msg = "Failed to set client root certificate(s): " +
"asn1: syntax error: data truncated"
assert.EqualError(t, err, msg)
if err != nil {
t.Log(err.Error())
}
}
func TestNewGRPCServer(t *testing.T) {
t.Parallel()
testAddress := "localhost:9053"
srv, err := comm.NewGRPCServer(testAddress,
comm.SecureServerConfig{UseTLS: false})
//check for error
if err != nil {
t.Fatalf("Failed to return new GRPC server: %v", err)
}
//make sure our properties are as expected
//resolve the address
addr, err := net.ResolveTCPAddr("tcp", testAddress)
assert.Equal(t, srv.Address(), addr.String())
assert.Equal(t, srv.Listener().Addr().String(), addr.String())
//TlSEnabled should be false
assert.Equal(t, srv.TLSEnabled(), false)
//register the GRPC test server
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
//start the server
go srv.Start()
defer srv.Stop()
//should not be needed
time.Sleep(10 * time.Millisecond)
//GRPC client options
var dialOptions []grpc.DialOption
dialOptions = append(dialOptions, grpc.WithInsecure())
//invoke the EmptyCall service
_, err = invokeEmptyCall(testAddress, dialOptions)
if err != nil {
t.Fatalf("GRPC client failed to invoke the EmptyCall service on %s: %v",
testAddress, err)
} else {
t.Log("GRPC client successfully invoked the EmptyCall service: " + testAddress)
}
}
func TestNewGRPCServerFromListener(t *testing.T) {
t.Parallel()
testAddress := "localhost:9054"
//create our listener
lis, err := net.Listen("tcp", testAddress)
if err != nil {
t.Fatalf("Failed to create listener: %v", err)
}
srv, err := comm.NewGRPCServerFromListener(lis,
comm.SecureServerConfig{UseTLS: false})
//check for error
if err != nil {
t.Fatalf("Failed to return new GRPC server: %v", err)
}
//make sure our properties are as expected
//resolve the address
addr, err := net.ResolveTCPAddr("tcp", testAddress)
assert.Equal(t, srv.Address(), addr.String())
assert.Equal(t, srv.Listener().Addr().String(), addr.String())
//TlSEnabled should be false
assert.Equal(t, srv.TLSEnabled(), false)
//register the GRPC test server
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
//start the server
go srv.Start()
defer srv.Stop()
//should not be needed
time.Sleep(10 * time.Millisecond)
//GRPC client options
var dialOptions []grpc.DialOption
dialOptions = append(dialOptions, grpc.WithInsecure())
//invoke the EmptyCall service
_, err = invokeEmptyCall(testAddress, dialOptions)
if err != nil {
t.Fatalf("GRPC client failed to invoke the EmptyCall service on %s: %v",
testAddress, err)
} else {
t.Log("GRPC client successfully invoked the EmptyCall service: " + testAddress)
}
}
func TestNewSecureGRPCServer(t *testing.T) {
t.Parallel()
testAddress := "localhost:9055"
srv, err := comm.NewGRPCServer(testAddress, comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: []byte(selfSignedCertPEM),
ServerKey: []byte(selfSignedKeyPEM),
})
//check for error
if err != nil {
t.Fatalf("Failed to return new GRPC server: %v", err)
}
//make sure our properties are as expected
//resolve the address
addr, err := net.ResolveTCPAddr("tcp", testAddress)
assert.Equal(t, srv.Address(), addr.String())
assert.Equal(t, srv.Listener().Addr().String(), addr.String())
//check the server certificate
cert, _ := tls.X509KeyPair([]byte(selfSignedCertPEM), []byte(selfSignedKeyPEM))
assert.Equal(t, srv.ServerCertificate(), cert)
//TlSEnabled should be true
assert.Equal(t, srv.TLSEnabled(), true)
//register the GRPC test server
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
//start the server
go srv.Start()
defer srv.Stop()
//should not be needed
time.Sleep(10 * time.Millisecond)
//create the client credentials
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM([]byte(selfSignedCertPEM)) {
t.Fatal("Failed to append certificate to client credentials")
}
creds := credentials.NewClientTLSFromCert(certPool, "")
//GRPC client options
var dialOptions []grpc.DialOption
dialOptions = append(dialOptions, grpc.WithTransportCredentials(creds))
//invoke the EmptyCall service
_, err = invokeEmptyCall(testAddress, dialOptions)
if err != nil {
t.Fatalf("GRPC client failed to invoke the EmptyCall service on %s: %v",
testAddress, err)
} else {
t.Log("GRPC client successfully invoked the EmptyCall service: " + testAddress)
}
// ensure that TLS 1.2 in required / enforced
for _, tlsVersion := range []uint16{tls.VersionSSL30, tls.VersionTLS10, tls.VersionTLS11} {
_, err = invokeEmptyCall(testAddress,
[]grpc.DialOption{grpc.WithTransportCredentials(
credentials.NewTLS(&tls.Config{
RootCAs: certPool,
MinVersion: tlsVersion,
MaxVersion: tlsVersion,
}))})
t.Logf("TLSVersion [%d] failed with [%s]", tlsVersion, err)
assert.Error(t, err, "Should not have been able to connect with TLS version < 1.2")
assert.Contains(t, err.Error(), "protocol version not supported")
}
}
func TestNewSecureGRPCServerFromListener(t *testing.T) {
t.Parallel()
testAddress := "localhost:9056"
//create our listener
lis, err := net.Listen("tcp", testAddress)
if err != nil {
t.Fatalf("Failed to create listener: %v", err)
}
srv, err := comm.NewGRPCServerFromListener(lis, comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: []byte(selfSignedCertPEM),
ServerKey: []byte(selfSignedKeyPEM),
})
//check for error
if err != nil {
t.Fatalf("Failed to return new GRPC server: %v", err)
}
//make sure our properties are as expected
//resolve the address
addr, err := net.ResolveTCPAddr("tcp", testAddress)
assert.Equal(t, srv.Address(), addr.String())
assert.Equal(t, srv.Listener().Addr().String(), addr.String())
//check the server certificate
cert, _ := tls.X509KeyPair([]byte(selfSignedCertPEM), []byte(selfSignedKeyPEM))
assert.Equal(t, srv.ServerCertificate(), cert)
//TlSEnabled should be true
assert.Equal(t, srv.TLSEnabled(), true) | //start the server
go srv.Start()
defer srv.Stop()
//should not be needed
time.Sleep(10 * time.Millisecond)
//create the client credentials
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM([]byte(selfSignedCertPEM)) {
t.Fatal("Failed to append certificate to client credentials")
}
creds := credentials.NewClientTLSFromCert(certPool, "")
//GRPC client options
var dialOptions []grpc.DialOption
dialOptions = append(dialOptions, grpc.WithTransportCredentials(creds))
//invoke the EmptyCall service
_, err = invokeEmptyCall(testAddress, dialOptions)
if err != nil {
t.Fatalf("GRPC client failed to invoke the EmptyCall service on %s: %v",
testAddress, err)
} else {
t.Log("GRPC client successfully invoked the EmptyCall service: " + testAddress)
}
}
//prior tests used self-signed certficates loaded by the GRPCServer and the test client
//here we'll use certificates signed by certificate authorities
func TestWithSignedRootCertificates(t *testing.T) {
t.Parallel()
//use Org1 testdata
fileBase := "Org1"
certPEMBlock, err := ioutil.ReadFile(filepath.Join("testdata", "certs", fileBase+"-server1-cert.pem"))
keyPEMBlock, err := ioutil.ReadFile(filepath.Join("testdata", "certs", fileBase+"-server1-key.pem"))
caPEMBlock, err := ioutil.ReadFile(filepath.Join("testdata", "certs", fileBase+"-cert.pem"))
if err != nil {
t.Fatalf("Failed to load test certificates: %v", err)
}
testAddress := "localhost:9057"
//create our listener
lis, err := net.Listen("tcp", testAddress)
if err != nil {
t.Fatalf("Failed to create listener: %v", err)
}
srv, err := comm.NewGRPCServerFromListener(lis, comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: certPEMBlock,
ServerKey: keyPEMBlock,
})
//check for error
if err != nil {
t.Fatalf("Failed to return new GRPC server: %v", err)
}
//register the GRPC test server
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
//start the server
go srv.Start()
defer srv.Stop()
//should not be needed
time.Sleep(10 * time.Millisecond)
//create a CertPool for use by the client with the server cert only
certPoolServer, err := createCertPool([][]byte{certPEMBlock})
if err != nil {
t.Fatalf("Failed to load root certificates into pool: %v", err)
}
//create the client credentials
creds := credentials.NewClientTLSFromCert(certPoolServer, "")
//GRPC client options
var dialOptions []grpc.DialOption
dialOptions = append(dialOptions, grpc.WithTransportCredentials(creds))
//invoke the EmptyCall service
_, err = invokeEmptyCall(testAddress, dialOptions)
//client should not be able to connect
assert.EqualError(t, err, x509.UnknownAuthorityError{}.Error())
t.Logf("assert.EqualError: %s", err.Error())
//now use the CA certificate
certPoolCA := x509.NewCertPool()
if !certPoolCA.AppendCertsFromPEM(caPEMBlock) {
t.Fatal("Failed to append certificate to client credentials")
}
creds = credentials.NewClientTLSFromCert(certPoolCA, "")
var dialOptionsCA []grpc.DialOption
dialOptionsCA = append(dialOptionsCA, grpc.WithTransportCredentials(creds))
//invoke the EmptyCall service
_, err2 := invokeEmptyCall(testAddress, dialOptionsCA)
if err2 != nil {
t.Fatalf("GRPC client failed to invoke the EmptyCall service on %s: %v",
testAddress, err2)
} else {
t.Log("GRPC client successfully invoked the EmptyCall service: " + testAddress)
}
}
//here we'll use certificates signed by intermediate certificate authorities
func TestWithSignedIntermediateCertificates(t *testing.T) {
t.Parallel()
//use Org1 testdata
fileBase := "Org1"
certPEMBlock, err := ioutil.ReadFile(filepath.Join("testdata", "certs", fileBase+"-child1-server1-cert.pem"))
keyPEMBlock, err := ioutil.ReadFile(filepath.Join("testdata", "certs", fileBase+"-child1-server1-key.pem"))
intermediatePEMBlock, err := ioutil.ReadFile(filepath.Join("testdata", "certs", fileBase+"-child1-cert.pem"))
if err != nil {
t.Fatalf("Failed to load test certificates: %v", err)
}
testAddress := "localhost:9058"
//create our listener
lis, err := net.Listen("tcp", testAddress)
if err != nil {
t.Fatalf("Failed to create listener: %v", err)
}
srv, err := comm.NewGRPCServerFromListener(lis, comm.SecureServerConfig{
UseTLS: true,
ServerCertificate: certPEMBlock,
ServerKey: keyPEMBlock,
})
//check for error
if err != nil {
t.Fatalf("Failed to return new GRPC server: %v", err)
}
//register the GRPC test server
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
//start the server
go srv.Start()
defer srv.Stop()
//should not be needed
time.Sleep(10 * time.Millisecond)
//create a CertPool for use by the client with the server cert only
certPoolServer, err := createCertPool([][]byte{certPEMBlock})
if err != nil {
t.Fatalf("Failed to load root certificates into pool: %v", err)
}
//create the client credentials
creds := credentials.NewClientTLSFromCert(certPoolServer, "")
//GRPC client options
var dialOptions []grpc.DialOption
dialOptions = append(dialOptions, grpc.WithTransportCredentials(creds))
//invoke the EmptyCall service
_, err = invokeEmptyCall(testAddress, dialOptions)
//client should not be able to connect
assert.EqualError(t, err, x509.UnknownAuthorityError{}.Error())
t.Logf("assert.EqualError: %s", err.Error())
//now use the CA certificate
//create a CertPool for use by the client with the intermediate root CA
certPoolCA, err := createCertPool([][]byte{intermediatePEMBlock})
if err != nil {
t.Fatalf("Failed to load root certificates into pool: %v", err)
}
creds = credentials.NewClientTLSFromCert(certPoolCA, "")
var dialOptionsCA []grpc.DialOption
dialOptionsCA = append(dialOptionsCA, grpc.WithTransportCredentials(creds))
//invoke the EmptyCall service
_, err2 := invokeEmptyCall(testAddress, dialOptionsCA)
if err2 != nil {
t.Fatalf("GRPC client failed to invoke the EmptyCall service on %s: %v",
testAddress, err2)
} else {
t.Log("GRPC client successfully invoked the EmptyCall service: " + testAddress)
}
}
//utility function for testing client / server communication using TLS
func runMutualAuth(t *testing.T, servers []testServer, trustedClients, unTrustedClients []*tls.Config) error {
//loop through all the test servers
for i := 0; i < len(servers); i++ {
//create listener
lis, err := net.Listen("tcp", servers[i].address)
if err != nil {
return err
}
//create GRPCServer
srv, err := comm.NewGRPCServerFromListener(lis, servers[i].config)
if err != nil {
return err
}
//register the GRPC test server and start the GRPCServer
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
go srv.Start()
defer srv.Stop()
//should not be needed but just in case
time.Sleep(10 * time.Millisecond)
//loop through all the trusted clients
for j := 0; j < len(trustedClients); j++ {
//invoke the EmptyCall service
_, err = invokeEmptyCall(servers[i].address,
[]grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(trustedClients[j]))})
//we expect success from trusted clients
if err != nil {
return err
} else {
t.Logf("Trusted client%d successfully connected to %s", j, servers[i].address)
}
}
//loop through all the untrusted clients
for k := 0; k < len(unTrustedClients); k++ {
//invoke the EmptyCall service
_, err = invokeEmptyCall(servers[i].address, []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(unTrustedClients[k]))})
//we expect failure from untrusted clients
if err != nil {
t.Logf("Untrusted client%d was correctly rejected by %s", k, servers[i].address)
} else {
return fmt.Errorf("Untrusted client %d should not have been able to connect to %s", k,
servers[i].address)
}
}
}
return nil
}
func TestMutualAuth(t *testing.T) {
t.Parallel()
var tests = []struct {
name string
servers []testServer
trustedClients []*tls.Config
unTrustedClients []*tls.Config
}{
{
name: "ClientAuthRequiredWithSingleOrg",
servers: testOrgs[0].testServers(9060, [][]byte{}),
trustedClients: testOrgs[0].trustedClients([][]byte{}),
unTrustedClients: testOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA}),
},
{
name: "ClientAuthRequiredWithChildClientOrg",
servers: testOrgs[0].testServers(9070, [][]byte{testOrgs[0].childOrgs[0].rootCA}),
trustedClients: testOrgs[0].childOrgs[0].trustedClients([][]byte{testOrgs[0].rootCA}),
unTrustedClients: testOrgs[0].childOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA}),
},
{
name: "ClientAuthRequiredWithMultipleChildClientOrgs",
servers: testOrgs[0].testServers(9080, append([][]byte{},
testOrgs[0].childOrgs[0].rootCA, testOrgs[0].childOrgs[1].rootCA)),
trustedClients: append(append([]*tls.Config{},
testOrgs[0].childOrgs[0].trustedClients([][]byte{testOrgs[0].rootCA})...),
testOrgs[0].childOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA})...),
unTrustedClients: testOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA}),
},
{
name: "ClientAuthRequiredWithDifferentServerAndClientOrgs",
servers: testOrgs[0].testServers(9090, [][]byte{testOrgs[1].rootCA}),
trustedClients: testOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA}),
unTrustedClients: testOrgs[0].childOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA}),
},
{
name: "ClientAuthRequiredWithDifferentServerAndChildClientOrgs",
servers: testOrgs[1].testServers(9100, [][]byte{testOrgs[0].childOrgs[0].rootCA}),
trustedClients: testOrgs[0].childOrgs[0].trustedClients([][]byte{testOrgs[1].rootCA}),
unTrustedClients: testOrgs[1].childOrgs[0].trustedClients([][]byte{testOrgs[1].rootCA}),
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
t.Logf("Running test %s ...", test.name)
testErr := runMutualAuth(t, test.servers, test.trustedClients, test.unTrustedClients)
if testErr != nil {
t.Fatalf("%s failed with error: %s", test.name, testErr.Error())
}
})
}
}
func TestAppendRemoveWithInvalidBytes(t *testing.T) {
// TODO: revisit when msp serialization without PEM type is resolved
t.Skip()
t.Parallel()
noPEMData := [][]byte{[]byte("badcert1"), []byte("badCert2")}
//get the config for one of our Org1 test servers
serverConfig := testOrgs[0].testServers(9200, [][]byte{})[0].config
address := testOrgs[0].testServers(9200, [][]byte{})[0].address
//create a GRPCServer
srv, err := comm.NewGRPCServer(address, serverConfig)
if err != nil {
t.Fatalf("Failed to create GRPCServer due to: %s", err.Error())
}
//append/remove nonPEMData
noCertsFound := "No client root certificates found"
err = srv.AppendClientRootCAs(noPEMData)
if err == nil {
t.Fatalf("Expected error: %s", noCertsFound)
}
err = srv.RemoveClientRootCAs(noPEMData)
if err == nil {
t.Fatalf("Expected error: %s", noCertsFound)
}
//apend/remove PEM without CERTIFICATE header
err = srv.AppendClientRootCAs([][]byte{[]byte(pemNoCertificateHeader)})
if err == nil {
t.Fatalf("Expected error: %s", noCertsFound)
}
err = srv.RemoveClientRootCAs([][]byte{[]byte(pemNoCertificateHeader)})
if err == nil {
t.Fatalf("Expected error: %s", noCertsFound)
}
//append/remove bad PEM data
err = srv.AppendClientRootCAs([][]byte{[]byte(badPEM)})
if err == nil {
t.Fatalf("Expected error parsing bad PEM data")
}
err = srv.RemoveClientRootCAs([][]byte{[]byte(badPEM)})
if err == nil {
t.Fatalf("Expected error parsing bad PEM data")
}
}
func TestAppendClientRootCAs(t *testing.T) {
t.Parallel()
//get the config for one of our Org1 test servers
serverConfig := testOrgs[0].testServers(9300, [][]byte{})[0].config
address := testOrgs[0].testServers(9300, [][]byte{})[0].address
//create a GRPCServer
srv, err := comm.NewGRPCServer(address, serverConfig)
if err != nil {
t.Fatalf("Failed to create GRPCServer due to: %s", err.Error())
}
//register the GRPC test server and start the GRPCServer
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
go srv.Start()
defer srv.Stop()
//should not be needed but just in case
time.Sleep(10 * time.Millisecond)
//try to connect with untrusted clients from Org2 children
clientConfig1 := testOrgs[1].childOrgs[0].trustedClients([][]byte{testOrgs[0].rootCA})[0]
clientConfig2 := testOrgs[1].childOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA})[0]
clientConfigs := []*tls.Config{clientConfig1, clientConfig2}
for i, clientConfig := range clientConfigs {
//invoke the EmptyCall service
_, err = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
//we expect failure as these are currently not trusted clients
if err != nil {
t.Logf("Untrusted client%d was correctly rejected by %s", i, address)
} else {
t.Fatalf("Untrusted client %d should not have been able to connect to %s", i,
address)
}
}
//now append the root CAs for the untrusted clients
err = srv.AppendClientRootCAs([][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA})
if err != nil {
t.Fatal("Failed to append client root CAs")
}
//now try to connect again
for j, clientConfig := range clientConfigs {
//invoke the EmptyCall service
_, err = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
//we expect success as these are now trusted clients
if err != nil {
t.Fatalf("Now trusted client%d failed to connect to %s with error: %s",
j, address, err.Error())
} else {
t.Logf("Now trusted client%d successfully connected to %s", j, address)
}
}
}
func TestRemoveClientRootCAs(t *testing.T) {
t.Parallel()
//get the config for one of our Org1 test servers and include client CAs from
//Org2 child orgs
serverConfig := testOrgs[0].testServers(9301,
[][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA})[0].config
address := testOrgs[0].testServers(9301, [][]byte{})[0].address
//create a GRPCServer
srv, err := comm.NewGRPCServer(address, serverConfig)
if err != nil {
t.Fatalf("Failed to create GRPCServer due to: %s", err.Error())
}
//register the GRPC test server and start the GRPCServer
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
go srv.Start()
defer srv.Stop()
//should not be needed but just in case
time.Sleep(10 * time.Millisecond)
//try to connect with trusted clients from Org2 children
clientConfig1 := testOrgs[1].childOrgs[0].trustedClients([][]byte{testOrgs[0].rootCA})[0]
clientConfig2 := testOrgs[1].childOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA})[0]
clientConfigs := []*tls.Config{clientConfig1, clientConfig2}
for i, clientConfig := range clientConfigs {
//invoke the EmptyCall service
_, err = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
//we expect success as these are trusted clients
if err != nil {
t.Fatalf("Trusted client%d failed to connect to %s with error: %s",
i, address, err.Error())
} else {
t.Logf("Trusted client%d successfully connected to %s", i, address)
}
}
//now remove the root CAs for the untrusted clients
err = srv.RemoveClientRootCAs([][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA})
if err != nil {
t.Fatal("Failed to remove client root CAs")
}
//now try to connect again
for j, clientConfig := range clientConfigs {
//invoke the EmptyCall service
_, err = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
//we expect failure as these are now untrusted clients
if err != nil {
t.Logf("Now untrusted client%d was correctly rejected by %s", j, address)
} else {
t.Fatalf("Now untrusted client %d should not have been able to connect to %s", j,
address)
}
}
}
//test for race conditions - test locally using "go test -race -run TestConcurrentAppendRemoveSet"
func TestConcurrentAppendRemoveSet(t *testing.T) {
t.Parallel()
//get the config for one of our Org1 test servers and include client CAs from
//Org2 child orgs
serverConfig := testOrgs[0].testServers(9302,
[][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA})[0].config
address := testOrgs[0].testServers(9302, [][]byte{})[0].address
//create a GRPCServer
srv, err := comm.NewGRPCServer(address, serverConfig)
if err != nil {
t.Fatalf("Failed to create GRPCServer due to: %s", err.Error())
}
//register the GRPC test server and start the GRPCServer
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
go srv.Start()
defer srv.Stop()
//need to wait for the following go routines to finish
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
//now remove the root CAs for the untrusted clients
err := srv.RemoveClientRootCAs([][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA})
if err != nil {
t.Fatal("Failed to remove client root CAs")
}
}()
wg.Add(1)
go func() {
defer wg.Done()
//set client root CAs
err := srv.SetClientRootCAs([][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA})
if err != nil {
t.Fatal("Failed to set client root CAs")
}
}()
//TODO: enable this after creating a custom type for grpc.TransportCredentials
/*
clientConfig := testOrgs[1].childOrgs[0].trustedClients([][]byte{testOrgs[0].rootCA})[0]
wg.Add(1)
go func() {
defer wg.Done()
_, _ = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
}()
*/
wg.Add(1)
go func() {
defer wg.Done()
//now append the root CAs for the untrusted clients
err := srv.AppendClientRootCAs([][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA})
if err != nil {
t.Fatal("Failed to append client root CAs")
}
}()
wg.Add(1)
go func() {
defer wg.Done()
//set client root CAs
err := srv.SetClientRootCAs([][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA})
if err != nil {
t.Fatal("Failed to set client root CAs")
}
}()
wg.Wait()
}
func TestSetClientRootCAs(t *testing.T) {
t.Parallel()
//get the config for one of our Org1 test servers
serverConfig := testOrgs[0].testServers(9303, [][]byte{})[0].config
address := testOrgs[0].testServers(9303, [][]byte{})[0].address
//create a GRPCServer
srv, err := comm.NewGRPCServer(address, serverConfig)
if err != nil {
t.Fatalf("Failed to create GRPCServer due to: %s", err.Error())
}
//register the GRPC test server and start the GRPCServer
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
go srv.Start()
defer srv.Stop()
//should not be needed but just in case
time.Sleep(10 * time.Millisecond)
//set up out test clients
//Org1
clientConfigOrg1Child1 := testOrgs[0].childOrgs[0].trustedClients([][]byte{testOrgs[0].rootCA})[0]
clientConfigOrg1Child2 := testOrgs[0].childOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA})[0]
clientConfigsOrg1Children := []*tls.Config{clientConfigOrg1Child1, clientConfigOrg1Child2}
org1ChildRootCAs := [][]byte{testOrgs[0].childOrgs[0].rootCA,
testOrgs[0].childOrgs[1].rootCA}
//Org2
clientConfigOrg2Child1 := testOrgs[1].childOrgs[0].trustedClients([][]byte{testOrgs[0].rootCA})[0]
clientConfigOrg2Child2 := testOrgs[1].childOrgs[1].trustedClients([][]byte{testOrgs[0].rootCA})[0]
clientConfigsOrg2Children := []*tls.Config{clientConfigOrg2Child1, clientConfigOrg2Child2}
org2ChildRootCAs := [][]byte{testOrgs[1].childOrgs[0].rootCA,
testOrgs[1].childOrgs[1].rootCA}
//initially set client CAs to Org1 children
err = srv.SetClientRootCAs(org1ChildRootCAs)
if err != nil {
t.Fatalf("SetClientRootCAs failed due to: %s", err.Error())
}
//clientConfigsOrg1Children are currently trusted
for i, clientConfig := range clientConfigsOrg1Children {
//invoke the EmptyCall service
_, err = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
//we expect success as these are trusted clients
if err != nil {
t.Fatalf("Trusted client%d failed to connect to %s with error: %s",
i, address, err.Error())
} else {
t.Logf("Trusted client%d successfully connected to %s", i, address)
}
}
//clientConfigsOrg2Children are currently not trusted
for j, clientConfig := range clientConfigsOrg2Children {
//invoke the EmptyCall service
_, err = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
//we expect failure as these are now untrusted clients
if err != nil {
t.Logf("Untrusted client%d was correctly rejected by %s", j, address)
} else {
t.Fatalf("Untrusted client %d should not have been able to connect to %s", j,
address)
}
}
//now set client CAs to Org2 children
err = srv.SetClientRootCAs(org2ChildRootCAs)
if err != nil {
t.Fatalf("SetClientRootCAs failed due to: %s", err.Error())
}
//now reverse trusted and not trusted
//clientConfigsOrg1Children are currently trusted
for i, clientConfig := range clientConfigsOrg2Children {
//invoke the EmptyCall service
_, err = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
//we expect success as these are trusted clients
if err != nil {
t.Fatalf("Trusted client%d failed to connect to %s with error: %s",
i, address, err.Error())
} else {
t.Logf("Trusted client%d successfully connected to %s", i, address)
}
}
//clientConfigsOrg2Children are currently not trusted
for j, clientConfig := range clientConfigsOrg1Children {
//invoke the EmptyCall service
_, err = invokeEmptyCall(address, []grpc.DialOption{
grpc.WithTransportCredentials(credentials.NewTLS(clientConfig))})
//we expect failure as these are now untrusted clients
if err != nil {
t.Logf("Untrusted client%d was correctly rejected by %s", j, address)
} else {
t.Fatalf("Untrusted client %d should not have been able to connect to %s", j,
address)
}
}
}
func TestKeepaliveNoClientResponse(t *testing.T) {
t.Parallel()
// set up GRPCServer instance
kap := comm.KeepaliveOptions{
ServerKeepaliveTime: 2,
ServerKeepaliveTimeout: 1,
}
comm.SetKeepaliveOptions(kap)
testAddress := "localhost:9400"
srv, err := comm.NewGRPCServer(testAddress, comm.SecureServerConfig{})
assert.NoError(t, err, "Unexpected error starting GRPCServer")
go srv.Start()
defer srv.Stop()
// test connection close if client does not response to ping
// net client will not response to keepalive
client, err := net.Dial("tcp", testAddress)
assert.NoError(t, err, "Unexpected error dialing GRPCServer")
defer client.Close()
// sleep past keepalive timeout
time.Sleep(4 * time.Second)
data := make([]byte, 24)
for {
_, err = client.Read(data)
if err == nil {
continue
}
assert.EqualError(t, err, io.EOF.Error(), "Expected io.EOF")
break
}
}
func TestKeepaliveClientResponse(t *testing.T) {
t.Parallel()
// set up GRPCServer instance
kap := comm.KeepaliveOptions{
ServerKeepaliveTime: 2,
ServerKeepaliveTimeout: 1,
}
comm.SetKeepaliveOptions(kap)
testAddress := "localhost:9401"
srv, err := comm.NewGRPCServer(testAddress, comm.SecureServerConfig{})
assert.NoError(t, err, "Unexpected error starting GRPCServer")
go srv.Start()
defer srv.Stop()
// test that connection does not close with response to ping
clientTransport, err := transport.NewClientTransport(context.Background(),
transport.TargetInfo{Addr: testAddress}, transport.ConnectOptions{})
assert.NoError(t, err, "Unexpected error creating client transport")
defer clientTransport.Close()
// sleep past keepalive timeout
time.Sleep(4 * time.Second)
// try to create a stream
_, err = clientTransport.NewStream(context.Background(), &transport.CallHdr{})
assert.NoError(t, err, "Unexpected error creating stream")
} |
//register the GRPC test server
testpb.RegisterTestServiceServer(srv.Server(), &testServiceServer{})
|
setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open("README.rst", "rt") as inf:
readme = inf.read() | with open("cgen/version.py") as version_file:
version_file_contents = version_file.read()
exec(compile(version_file_contents, "cgen/version.py", 'exec'), ver_dic)
setup(
name="cgen",
version=ver_dic["VERSION_TEXT"],
description="C/C++ source generation from an AST",
long_description=readme,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
author="Andreas Kloeckner",
author_email="[email protected]",
license="MIT",
url="http://documen.tician.de/cgen/",
packages=["cgen"],
python_requires="~=3.6",
install_requires=[
"pytools>=2015.1.2",
"numpy>=1.6",
]) |
ver_dic = {} |
similarity.rs | //! This submodule provides some basic similarity measures
//!
//! It supports sparse vectors from `sprs` which seems to be the most popular
//! library for sparse algebra.
use sprs::CsVec;
/// Type for a similarity function | /// Returns zero if one of the vectors is zero.
pub fn cosine(a: &CsVec<f64>, b: &CsVec<f64>) -> f64 {
let norms = a.dot(a) * b.dot(b);
if norms > 0.0 {
a.dot(b) / norms.sqrt()
} else {
0.0
}
} | pub type Similarity = fn(&CsVec<f64>, &CsVec<f64>) -> f64;
/// Cosine similarity between two vectors.
/// |
pgp_encrypt.go | // Copyright 2015 Keybase, Inc. All rights reserved. Use of
// this source code is governed by the included BSD license.
package engine
import (
"errors"
"io"
"github.com/keybase/client/go/libkb"
keybase1 "github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-crypto/openpgp/armor"
)
type PGPEncryptArg struct {
Recips []string // user assertions
Source io.Reader
Sink io.WriteCloser
NoSign bool
NoSelf bool
BinaryOutput bool
KeyQuery string
}
// PGPEncrypt encrypts data read from a source into a sink
// for a set of users. It will track them if necessary.
type PGPEncrypt struct {
arg *PGPEncryptArg
me *libkb.User
libkb.Contextified
}
// NewPGPEncrypt creates a PGPEncrypt engine.
func NewPGPEncrypt(arg *PGPEncryptArg, g *libkb.GlobalContext) *PGPEncrypt {
return &PGPEncrypt{
arg: arg,
Contextified: libkb.NewContextified(g),
}
} | return "PGPEncrypt"
}
// GetPrereqs returns the engine prereqs.
func (e *PGPEncrypt) Prereqs() Prereqs {
return Prereqs{}
}
// RequiredUIs returns the required UIs.
func (e *PGPEncrypt) RequiredUIs() []libkb.UIKind {
// context.SecretKeyPromptArg requires SecretUI
return []libkb.UIKind{libkb.SecretUIKind}
}
// SubConsumers returns the other UI consumers for this engine.
func (e *PGPEncrypt) SubConsumers() []libkb.UIConsumer {
return []libkb.UIConsumer{
&PGPKeyfinder{},
&ResolveThenIdentify2{},
}
}
// Run starts the engine.
func (e *PGPEncrypt) Run(ctx *Context) error {
// verify valid options based on logged in state:
ok, uid, err := IsLoggedIn(e, ctx)
if err != nil {
return err
}
if !ok {
// not logged in. this is fine, unless they requested signing the message.
if !e.arg.NoSign {
return libkb.LoginRequiredError{Context: "you must be logged in to sign"}
}
// or trying to encrypt for self
if !e.arg.NoSelf {
return libkb.LoginRequiredError{Context: "you must be logged in to encrypt for yourself"}
}
} else {
me, err := libkb.LoadMeByUID(ctx.GetNetContext(), e.G(), uid)
if err != nil {
return err
}
e.me = me
}
var mykey *libkb.PGPKeyBundle
var signer *libkb.PGPKeyBundle
if !e.arg.NoSign {
ska := libkb.SecretKeyArg{
Me: e.me,
KeyType: libkb.PGPKeyType,
KeyQuery: e.arg.KeyQuery,
}
key, err := e.G().Keyrings.GetSecretKeyWithPrompt(ctx.SecretKeyPromptArg(ska, "command-line signature"))
if err != nil {
return err
}
var ok bool
mykey, ok = key.(*libkb.PGPKeyBundle)
if !ok {
return errors.New("Can only sign with PGP keys")
}
signer = mykey
}
usernames, err := e.verifyUsers(ctx, e.arg.Recips, ok)
if err != nil {
return err
}
kfarg := &PGPKeyfinderArg{
Usernames: usernames,
}
kf := NewPGPKeyfinder(kfarg, e.G())
if err := RunEngine(kf, ctx); err != nil {
return err
}
uplus := kf.UsersPlusKeys()
var writer io.WriteCloser
if e.arg.BinaryOutput {
writer = e.arg.Sink
} else {
aw, err := armor.Encode(e.arg.Sink, "PGP MESSAGE", libkb.PGPArmorHeaders)
if err != nil {
return err
}
writer = aw
}
ks := newKeyset()
if !e.arg.NoSelf {
if mykey == nil {
// need to load the public key for the logged in user
mykey, err = e.loadSelfKey()
if err != nil {
return err
}
}
// mykey could still be nil
if mykey != nil {
ks.Add(mykey)
}
}
for _, up := range uplus {
for _, k := range up.Keys {
ks.Add(k)
}
}
recipients := ks.Sorted()
if err := libkb.PGPEncrypt(e.arg.Source, writer, signer, recipients); err != nil {
return err
}
if !e.arg.BinaryOutput {
return e.arg.Sink.Close()
}
return nil
}
func (e *PGPEncrypt) loadSelfKey() (*libkb.PGPKeyBundle, error) {
me, err := libkb.LoadMe(libkb.NewLoadUserArg(e.G()))
if err != nil {
return nil, err
}
keys := me.FilterActivePGPKeys(true, e.arg.KeyQuery)
if len(keys) == 0 {
return nil, libkb.NoKeyError{Msg: "No PGP key found for encrypting for self"}
}
return keys[0], nil
}
func (e *PGPEncrypt) verifyUsers(ctx *Context, assertions []string, loggedIn bool) ([]string, error) {
var names []string
for _, userAssert := range assertions {
arg := keybase1.Identify2Arg{
UserAssertion: userAssert,
Reason: keybase1.IdentifyReason{
Type: keybase1.IdentifyReasonType_ENCRYPT,
},
AlwaysBlock: true,
}
eng := NewResolveThenIdentify2(e.G(), &arg)
if err := RunEngine(eng, ctx); err != nil {
return nil, libkb.IdentifyFailedError{Assertion: userAssert, Reason: err.Error()}
}
res := eng.Result()
names = append(names, res.Upk.Username)
}
return names, nil
}
// keyset maintains a set of pgp keys, preserving insertion order.
type keyset struct {
index []keybase1.KID
keys map[keybase1.KID]*libkb.PGPKeyBundle
}
// newKeyset creates an empty keyset.
func newKeyset() *keyset {
return &keyset{keys: make(map[keybase1.KID]*libkb.PGPKeyBundle)}
}
// Add adds bundle to the keyset. If a key already exists, it
// will be ignored.
func (k *keyset) Add(bundle *libkb.PGPKeyBundle) {
kid := bundle.GetKID()
if _, ok := k.keys[kid]; ok {
return
}
k.keys[kid] = bundle
k.index = append(k.index, kid)
}
// Sorted returns the unique keys in insertion order.
func (k *keyset) Sorted() []*libkb.PGPKeyBundle {
var sorted []*libkb.PGPKeyBundle
for _, kid := range k.index {
sorted = append(sorted, k.keys[kid])
}
return sorted
} |
// Name is the unique engine name.
func (e *PGPEncrypt) Name() string { |
test.py | import grpc
from feast.protos.feast.serving.ServingService_pb2 import (
FeatureList,
GetOnlineFeaturesRequest,
)
from feast.protos.feast.serving.ServingService_pb2_grpc import ServingServiceStub
from feast.protos.feast.types.Value_pb2 import RepeatedValue, Value
# Sample logic to fetch from a local gRPC java server deployed at 6566
def | ():
channel = grpc.insecure_channel("localhost:6566")
stub = ServingServiceStub(channel)
feature_refs = FeatureList(val=["driver_hourly_stats:conv_rate"])
entity_rows = {
"driver_id": RepeatedValue(
val=[Value(int64_val=driver_id) for driver_id in range(1001, 1003)]
)
}
print(
stub.GetOnlineFeatures(
GetOnlineFeaturesRequest(features=feature_refs, entities=entity_rows,)
)
)
if __name__ == "__main__":
fetch_java()
| fetch_java |
des_010_install_plugins.js | #!/usr/bin/env node
//this hook installs all your plugins
// add your plugins to this list--either
// the identifier, the filesystem location
// or the URL | var pluginlist = [
"cordova-plugin-camera",
"cordova-plugin-console",
"cordova-plugin-device",
"cordova-plugin-file",
"cordova-plugin-file-transfer",
"https://github.com/jcesarmobile/FilePicker-Phonegap-iOS-Plugin.git",
"https://github.com/jessisena/MFileChooser.git",
"https://github.com/apache/cordova-plugin-geolocation.git",
"cordova-plugin-inappbrowser",
"cordova-plugin-network-information",
"cordova-plugin-x-socialsharing",
"cordova-plugin-x-toast",
"org.pbernasconi.progressindicator",
"cordova-plugin-splashscreen",
"cordova-plugin-statusbar",
"cordova-plugin-whitelist",
"ionic-plugin-keyboard",
//ionic plugin add https://github.com/danwilson/google-analytics-plugin.git
//Tret perque donava conflicte IDFA
"[email protected]",
"cordova-custom-config",
"cordova.plugins.diagnostic",
//iOS
// "cordova-sqlite-ext",
//android
"https://github.com/jessisena/my-cordova-sqlite-storage.git"
];
// no need to configure below
var fs = require('fs');
var path = require('path');
var sys = require('sys')
var exec = require('child_process').exec;
function puts(error, stdout, stderr) {
sys.puts(stdout)
}
pluginlist.forEach(function(plug) {
exec("ionic plugin add " + plug, puts);
}); | |
lower.rs | //! AST -> `ItemTree` lowering code.
use super::*;
use crate::{
attr::Attrs,
generics::{GenericParams, TypeParamData, TypeParamProvenance},
};
use hir_expand::{ast_id_map::AstIdMap, hygiene::Hygiene, HirFileId};
use ra_arena::map::ArenaMap;
use ra_syntax::{
ast::{self, ModuleItemOwner},
SyntaxNode,
};
use smallvec::SmallVec;
use std::{collections::hash_map::Entry, mem, sync::Arc};
fn id<N: ItemTreeNode>(index: Idx<N>) -> FileItemTreeId<N> {
FileItemTreeId { index, _p: PhantomData }
}
struct ModItems(SmallVec<[ModItem; 1]>);
impl<T> From<T> for ModItems
where
T: Into<ModItem>,
{
fn from(t: T) -> Self {
ModItems(SmallVec::from_buf([t.into(); 1]))
}
}
pub(super) struct Ctx {
tree: ItemTree,
hygiene: Hygiene,
file: HirFileId,
source_ast_id_map: Arc<AstIdMap>,
body_ctx: crate::body::LowerCtx,
inner_items: Vec<ModItem>,
forced_visibility: Option<RawVisibilityId>,
}
impl Ctx {
pub(super) fn new(db: &dyn DefDatabase, hygiene: Hygiene, file: HirFileId) -> Self {
Self {
tree: ItemTree::empty(),
hygiene,
file,
source_ast_id_map: db.ast_id_map(file),
body_ctx: crate::body::LowerCtx::new(db, file),
inner_items: Vec::new(),
forced_visibility: None,
}
}
pub(super) fn lower_module_items(mut self, item_owner: &dyn ModuleItemOwner) -> ItemTree {
self.tree.top_level = item_owner
.items()
.flat_map(|item| self.lower_mod_item(&item, false))
.flat_map(|items| items.0)
.collect();
self.tree
}
pub(super) fn lower_inner_items(mut self, within: &SyntaxNode) -> ItemTree {
self.collect_inner_items(within);
self.tree
}
fn data(&mut self) -> &mut ItemTreeData {
self.tree.data_mut()
}
fn lower_mod_item(&mut self, item: &ast::ModuleItem, inner: bool) -> Option<ModItems> {
assert!(inner || self.inner_items.is_empty());
// Collect inner items for 1-to-1-lowered items.
match item {
ast::ModuleItem::StructDef(_)
| ast::ModuleItem::UnionDef(_)
| ast::ModuleItem::EnumDef(_)
| ast::ModuleItem::FnDef(_)
| ast::ModuleItem::TypeAliasDef(_)
| ast::ModuleItem::ConstDef(_)
| ast::ModuleItem::StaticDef(_)
| ast::ModuleItem::MacroCall(_) => {
// Skip this if we're already collecting inner items. We'll descend into all nodes
// already.
if !inner {
self.collect_inner_items(item.syntax());
}
}
// These are handled in their respective `lower_X` method (since we can't just blindly
// walk them).
ast::ModuleItem::TraitDef(_)
| ast::ModuleItem::ImplDef(_)
| ast::ModuleItem::ExternBlock(_) => {}
// These don't have inner items.
ast::ModuleItem::Module(_)
| ast::ModuleItem::ExternCrateItem(_)
| ast::ModuleItem::UseItem(_) => {}
};
let attrs = Attrs::new(item, &self.hygiene);
let items = match item {
ast::ModuleItem::StructDef(ast) => self.lower_struct(ast).map(Into::into),
ast::ModuleItem::UnionDef(ast) => self.lower_union(ast).map(Into::into),
ast::ModuleItem::EnumDef(ast) => self.lower_enum(ast).map(Into::into),
ast::ModuleItem::FnDef(ast) => self.lower_function(ast).map(Into::into),
ast::ModuleItem::TypeAliasDef(ast) => self.lower_type_alias(ast).map(Into::into),
ast::ModuleItem::StaticDef(ast) => self.lower_static(ast).map(Into::into),
ast::ModuleItem::ConstDef(ast) => Some(self.lower_const(ast).into()),
ast::ModuleItem::Module(ast) => self.lower_module(ast).map(Into::into),
ast::ModuleItem::TraitDef(ast) => self.lower_trait(ast).map(Into::into),
ast::ModuleItem::ImplDef(ast) => self.lower_impl(ast).map(Into::into),
ast::ModuleItem::UseItem(ast) => Some(ModItems(
self.lower_use(ast).into_iter().map(Into::into).collect::<SmallVec<_>>(),
)),
ast::ModuleItem::ExternCrateItem(ast) => self.lower_extern_crate(ast).map(Into::into),
ast::ModuleItem::MacroCall(ast) => self.lower_macro_call(ast).map(Into::into),
ast::ModuleItem::ExternBlock(ast) => {
Some(ModItems(self.lower_extern_block(ast).into_iter().collect::<SmallVec<_>>()))
}
};
if !attrs.is_empty() {
for item in items.iter().flat_map(|items| &items.0) {
self.add_attrs((*item).into(), attrs.clone());
}
}
items
}
fn add_attrs(&mut self, item: AttrOwner, attrs: Attrs) {
match self.tree.attrs.entry(item) {
Entry::Occupied(mut entry) => {
*entry.get_mut() = entry.get().merge(attrs);
}
Entry::Vacant(entry) => {
entry.insert(attrs);
}
}
}
fn collect_inner_items(&mut self, container: &SyntaxNode) {
let forced_vis = self.forced_visibility.take();
let mut inner_items = mem::take(&mut self.tree.inner_items);
inner_items.extend(
container.descendants().skip(1).filter_map(ast::ModuleItem::cast).filter_map(|item| {
let ast_id = self.source_ast_id_map.ast_id(&item);
Some((ast_id, self.lower_mod_item(&item, true)?.0))
}),
);
self.tree.inner_items = inner_items;
self.forced_visibility = forced_vis;
}
fn lower_assoc_item(&mut self, item: &ast::ModuleItem) -> Option<AssocItem> {
match item {
ast::ModuleItem::FnDef(ast) => self.lower_function(ast).map(Into::into),
ast::ModuleItem::TypeAliasDef(ast) => self.lower_type_alias(ast).map(Into::into),
ast::ModuleItem::ConstDef(ast) => Some(self.lower_const(ast).into()),
ast::ModuleItem::MacroCall(ast) => self.lower_macro_call(ast).map(Into::into),
_ => None,
}
}
fn lower_struct(&mut self, strukt: &ast::StructDef) -> Option<FileItemTreeId<Struct>> {
let visibility = self.lower_visibility(strukt);
let name = strukt.name()?.as_name();
let generic_params = self.lower_generic_params(GenericsOwner::Struct, strukt);
let fields = self.lower_fields(&strukt.kind());
let ast_id = self.source_ast_id_map.ast_id(strukt);
let kind = match strukt.kind() {
ast::StructKind::Record(_) => StructDefKind::Record,
ast::StructKind::Tuple(_) => StructDefKind::Tuple,
ast::StructKind::Unit => StructDefKind::Unit,
};
let res = Struct { name, visibility, generic_params, fields, ast_id, kind };
Some(id(self.data().structs.alloc(res)))
}
fn lower_fields(&mut self, strukt_kind: &ast::StructKind) -> Fields {
match strukt_kind {
ast::StructKind::Record(it) => {
let range = self.lower_record_fields(it);
Fields::Record(range)
}
ast::StructKind::Tuple(it) => {
let range = self.lower_tuple_fields(it);
Fields::Tuple(range)
}
ast::StructKind::Unit => Fields::Unit,
}
}
fn lower_record_fields(&mut self, fields: &ast::RecordFieldDefList) -> IdRange<Field> {
let start = self.next_field_idx();
for field in fields.fields() {
if let Some(data) = self.lower_record_field(&field) {
let idx = self.data().fields.alloc(data);
self.add_attrs(idx.into(), Attrs::new(&field, &self.hygiene));
}
}
let end = self.next_field_idx();
IdRange::new(start..end)
}
fn lower_record_field(&mut self, field: &ast::RecordFieldDef) -> Option<Field> {
let name = field.name()?.as_name();
let visibility = self.lower_visibility(field);
let type_ref = self.lower_type_ref_opt(field.ascribed_type());
let res = Field { name, type_ref, visibility };
Some(res)
}
fn lower_tuple_fields(&mut self, fields: &ast::TupleFieldDefList) -> IdRange<Field> {
let start = self.next_field_idx();
for (i, field) in fields.fields().enumerate() {
let data = self.lower_tuple_field(i, &field);
let idx = self.data().fields.alloc(data);
self.add_attrs(idx.into(), Attrs::new(&field, &self.hygiene));
}
let end = self.next_field_idx();
IdRange::new(start..end)
}
fn lower_tuple_field(&mut self, idx: usize, field: &ast::TupleFieldDef) -> Field {
let name = Name::new_tuple_field(idx);
let visibility = self.lower_visibility(field);
let type_ref = self.lower_type_ref_opt(field.type_ref());
let res = Field { name, type_ref, visibility };
res
}
fn lower_union(&mut self, union: &ast::UnionDef) -> Option<FileItemTreeId<Union>> {
let visibility = self.lower_visibility(union);
let name = union.name()?.as_name();
let generic_params = self.lower_generic_params(GenericsOwner::Union, union);
let fields = match union.record_field_def_list() {
Some(record_field_def_list) => {
self.lower_fields(&StructKind::Record(record_field_def_list))
}
None => Fields::Record(IdRange::new(self.next_field_idx()..self.next_field_idx())),
};
let ast_id = self.source_ast_id_map.ast_id(union);
let res = Union { name, visibility, generic_params, fields, ast_id };
Some(id(self.data().unions.alloc(res)))
}
fn lower_enum(&mut self, enum_: &ast::EnumDef) -> Option<FileItemTreeId<Enum>> {
let visibility = self.lower_visibility(enum_);
let name = enum_.name()?.as_name();
let generic_params = self.lower_generic_params(GenericsOwner::Enum, enum_);
let variants = match &enum_.variant_list() {
Some(variant_list) => self.lower_variants(variant_list),
None => IdRange::new(self.next_variant_idx()..self.next_variant_idx()),
};
let ast_id = self.source_ast_id_map.ast_id(enum_);
let res = Enum { name, visibility, generic_params, variants, ast_id };
Some(id(self.data().enums.alloc(res)))
}
fn lower_variants(&mut self, variants: &ast::EnumVariantList) -> IdRange<Variant> {
let start = self.next_variant_idx();
for variant in variants.variants() {
if let Some(data) = self.lower_variant(&variant) {
let idx = self.data().variants.alloc(data);
self.add_attrs(idx.into(), Attrs::new(&variant, &self.hygiene));
}
}
let end = self.next_variant_idx();
IdRange::new(start..end)
}
fn lower_variant(&mut self, variant: &ast::EnumVariant) -> Option<Variant> {
let name = variant.name()?.as_name();
let fields = self.lower_fields(&variant.kind());
let res = Variant { name, fields };
Some(res)
}
fn lower_function(&mut self, func: &ast::FnDef) -> Option<FileItemTreeId<Function>> {
let visibility = self.lower_visibility(func);
let name = func.name()?.as_name();
let mut params = Vec::new();
let mut has_self_param = false;
if let Some(param_list) = func.param_list() {
if let Some(self_param) = param_list.self_param() {
let self_type = match self_param.ascribed_type() {
Some(type_ref) => TypeRef::from_ast(&self.body_ctx, type_ref),
None => {
let self_type = TypeRef::Path(name![Self].into());
match self_param.kind() {
ast::SelfParamKind::Owned => self_type,
ast::SelfParamKind::Ref => {
TypeRef::Reference(Box::new(self_type), Mutability::Shared)
}
ast::SelfParamKind::MutRef => {
TypeRef::Reference(Box::new(self_type), Mutability::Mut)
}
}
}
};
params.push(self_type);
has_self_param = true;
}
for param in param_list.params() {
let type_ref = TypeRef::from_ast_opt(&self.body_ctx, param.ascribed_type());
params.push(type_ref);
}
}
let mut is_varargs = false;
if let Some(params) = func.param_list() {
if let Some(last) = params.params().last() {
is_varargs = last.dotdotdot_token().is_some();
}
}
let ret_type = match func.ret_type().and_then(|rt| rt.type_ref()) {
Some(type_ref) => TypeRef::from_ast(&self.body_ctx, type_ref),
_ => TypeRef::unit(),
};
let ret_type = if func.async_token().is_some() {
let future_impl = desugar_future_path(ret_type);
let ty_bound = TypeBound::Path(future_impl);
TypeRef::ImplTrait(vec![ty_bound])
} else {
ret_type
};
let ast_id = self.source_ast_id_map.ast_id(func);
let mut res = Function {
name,
visibility,
generic_params: GenericParamsId::EMPTY,
has_self_param,
is_unsafe: func.unsafe_token().is_some(),
params: params.into_boxed_slice(),
is_varargs,
ret_type,
ast_id,
};
res.generic_params = self.lower_generic_params(GenericsOwner::Function(&res), func);
Some(id(self.data().functions.alloc(res)))
}
fn lower_type_alias(
&mut self,
type_alias: &ast::TypeAliasDef,
) -> Option<FileItemTreeId<TypeAlias>> {
let name = type_alias.name()?.as_name();
let type_ref = type_alias.type_ref().map(|it| self.lower_type_ref(&it));
let visibility = self.lower_visibility(type_alias);
let bounds = self.lower_type_bounds(type_alias);
let generic_params = self.lower_generic_params(GenericsOwner::TypeAlias, type_alias);
let ast_id = self.source_ast_id_map.ast_id(type_alias);
let res = TypeAlias {
name,
visibility,
bounds: bounds.into_boxed_slice(),
generic_params,
type_ref,
ast_id,
};
Some(id(self.data().type_aliases.alloc(res)))
}
fn lower_static(&mut self, static_: &ast::StaticDef) -> Option<FileItemTreeId<Static>> {
let name = static_.name()?.as_name();
let type_ref = self.lower_type_ref_opt(static_.ascribed_type());
let visibility = self.lower_visibility(static_);
let mutable = static_.mut_token().is_some();
let ast_id = self.source_ast_id_map.ast_id(static_);
let res = Static { name, visibility, mutable, type_ref, ast_id };
Some(id(self.data().statics.alloc(res)))
}
fn lower_const(&mut self, konst: &ast::ConstDef) -> FileItemTreeId<Const> {
let name = konst.name().map(|it| it.as_name());
let type_ref = self.lower_type_ref_opt(konst.ascribed_type());
let visibility = self.lower_visibility(konst);
let ast_id = self.source_ast_id_map.ast_id(konst);
let res = Const { name, visibility, type_ref, ast_id };
id(self.data().consts.alloc(res))
}
fn lower_module(&mut self, module: &ast::Module) -> Option<FileItemTreeId<Mod>> {
let name = module.name()?.as_name();
let visibility = self.lower_visibility(module);
let kind = if module.semicolon_token().is_some() {
ModKind::Outline {}
} else {
ModKind::Inline {
items: module
.item_list()
.map(|list| {
list.items()
.flat_map(|item| self.lower_mod_item(&item, false))
.flat_map(|items| items.0)
.collect()
})
.unwrap_or_else(|| {
mark::hit!(name_res_works_for_broken_modules);
Box::new([]) as Box<[_]>
}),
}
};
let ast_id = self.source_ast_id_map.ast_id(module);
let res = Mod { name, visibility, kind, ast_id };
Some(id(self.data().mods.alloc(res)))
}
fn lower_trait(&mut self, trait_def: &ast::TraitDef) -> Option<FileItemTreeId<Trait>> {
let name = trait_def.name()?.as_name();
let visibility = self.lower_visibility(trait_def);
let generic_params =
self.lower_generic_params_and_inner_items(GenericsOwner::Trait(trait_def), trait_def);
let auto = trait_def.auto_token().is_some();
let items = trait_def.item_list().map(|list| {
self.with_inherited_visibility(visibility, |this| {
list.items()
.filter_map(|item| {
let attrs = Attrs::new(&item, &this.hygiene);
this.collect_inner_items(item.syntax());
this.lower_assoc_item(&item).map(|item| {
this.add_attrs(ModItem::from(item).into(), attrs);
item
})
})
.collect()
})
});
let ast_id = self.source_ast_id_map.ast_id(trait_def);
let res = Trait {
name,
visibility,
generic_params,
auto,
items: items.unwrap_or_default(),
ast_id,
};
Some(id(self.data().traits.alloc(res)))
}
fn lower_impl(&mut self, impl_def: &ast::ImplDef) -> Option<FileItemTreeId<Impl>> {
let generic_params =
self.lower_generic_params_and_inner_items(GenericsOwner::Impl, impl_def);
let target_trait = impl_def.target_trait().map(|tr| self.lower_type_ref(&tr));
let target_type = self.lower_type_ref(&impl_def.target_type()?);
let is_negative = impl_def.excl_token().is_some();
// We cannot use `assoc_items()` here as that does not include macro calls.
let items = impl_def
.item_list()
.into_iter()
.flat_map(|it| it.items())
.filter_map(|item| {
self.collect_inner_items(item.syntax());
let assoc = self.lower_assoc_item(&item)?;
let attrs = Attrs::new(&item, &self.hygiene);
self.add_attrs(ModItem::from(assoc).into(), attrs);
Some(assoc)
})
.collect();
let ast_id = self.source_ast_id_map.ast_id(impl_def);
let res = Impl { generic_params, target_trait, target_type, is_negative, items, ast_id };
Some(id(self.data().impls.alloc(res)))
}
fn lower_use(&mut self, use_item: &ast::UseItem) -> Vec<FileItemTreeId<Import>> {
// FIXME: cfg_attr
let is_prelude = use_item.has_atom_attr("prelude_import");
let visibility = self.lower_visibility(use_item);
let ast_id = self.source_ast_id_map.ast_id(use_item);
// Every use item can expand to many `Import`s.
let mut imports = Vec::new();
let tree = self.tree.data_mut();
ModPath::expand_use_item(
InFile::new(self.file, use_item.clone()),
&self.hygiene,
|path, _tree, is_glob, alias| {
imports.push(id(tree.imports.alloc(Import {
path,
alias,
visibility,
is_glob,
is_prelude,
ast_id,
})));
},
);
imports
}
fn lower_extern_crate(
&mut self,
extern_crate: &ast::ExternCrateItem,
) -> Option<FileItemTreeId<ExternCrate>> {
let path = ModPath::from_name_ref(&extern_crate.name_ref()?);
let alias = extern_crate.alias().map(|a| {
a.name().map(|it| it.as_name()).map_or(ImportAlias::Underscore, ImportAlias::Alias)
});
let visibility = self.lower_visibility(extern_crate);
let ast_id = self.source_ast_id_map.ast_id(extern_crate);
// FIXME: cfg_attr
let is_macro_use = extern_crate.has_atom_attr("macro_use");
let res = ExternCrate { path, alias, visibility, is_macro_use, ast_id };
Some(id(self.data().extern_crates.alloc(res)))
}
fn lower_macro_call(&mut self, m: &ast::MacroCall) -> Option<FileItemTreeId<MacroCall>> {
let name = m.name().map(|it| it.as_name());
let attrs = Attrs::new(m, &self.hygiene);
let path = ModPath::from_src(m.path()?, &self.hygiene)?;
let ast_id = self.source_ast_id_map.ast_id(m);
// FIXME: cfg_attr
let export_attr = attrs.by_key("macro_export");
let is_export = export_attr.exists();
let is_local_inner = if is_export {
export_attr.tt_values().map(|it| &it.token_trees).flatten().any(|it| match it {
tt::TokenTree::Leaf(tt::Leaf::Ident(ident)) => {
ident.text.contains("local_inner_macros")
}
_ => false,
})
} else {
false
};
let is_builtin = attrs.by_key("rustc_builtin_macro").exists();
let res = MacroCall { name, path, is_export, is_builtin, is_local_inner, ast_id };
Some(id(self.data().macro_calls.alloc(res)))
}
fn lower_extern_block(&mut self, block: &ast::ExternBlock) -> Vec<ModItem> {
block.extern_item_list().map_or(Vec::new(), |list| {
list.extern_items()
.filter_map(|item| {
self.collect_inner_items(item.syntax());
let attrs = Attrs::new(&item, &self.hygiene);
let id: ModItem = match item {
ast::ExternItem::FnDef(ast) => {
let func = self.lower_function(&ast)?;
self.data().functions[func.index].is_unsafe = true;
func.into()
}
ast::ExternItem::StaticDef(ast) => {
let statik = self.lower_static(&ast)?;
statik.into()
}
};
self.add_attrs(id.into(), attrs);
Some(id)
})
.collect()
})
}
/// Lowers generics defined on `node` and collects inner items defined within.
fn lower_generic_params_and_inner_items(
&mut self,
owner: GenericsOwner<'_>,
node: &impl ast::TypeParamsOwner,
) -> GenericParamsId {
// Generics are part of item headers and may contain inner items we need to collect.
if let Some(params) = node.type_param_list() {
self.collect_inner_items(params.syntax());
}
if let Some(clause) = node.where_clause() {
self.collect_inner_items(clause.syntax());
}
self.lower_generic_params(owner, node)
}
fn lower_generic_params(
&mut self,
owner: GenericsOwner<'_>,
node: &impl ast::TypeParamsOwner,
) -> GenericParamsId {
let mut sm = &mut ArenaMap::default();
let mut generics = GenericParams::default();
match owner {
GenericsOwner::Function(func) => {
generics.fill(&self.body_ctx, sm, node);
// lower `impl Trait` in arguments
for param in &*func.params {
generics.fill_implicit_impl_trait_args(param);
}
}
GenericsOwner::Struct
| GenericsOwner::Enum
| GenericsOwner::Union
| GenericsOwner::TypeAlias => {
generics.fill(&self.body_ctx, sm, node);
}
GenericsOwner::Trait(trait_def) => {
// traits get the Self type as an implicit first type parameter
let self_param_id = generics.types.alloc(TypeParamData {
name: Some(name![Self]),
default: None,
provenance: TypeParamProvenance::TraitSelf,
});
sm.insert(self_param_id, Either::Left(trait_def.clone()));
// add super traits as bounds on Self
// i.e., trait Foo: Bar is equivalent to trait Foo where Self: Bar
let self_param = TypeRef::Path(name![Self].into());
generics.fill_bounds(&self.body_ctx, trait_def, self_param);
generics.fill(&self.body_ctx, &mut sm, node);
}
GenericsOwner::Impl => {
// Note that we don't add `Self` here: in `impl`s, `Self` is not a
// type-parameter, but rather is a type-alias for impl's target
// type, so this is handled by the resolver.
generics.fill(&self.body_ctx, &mut sm, node);
}
}
self.data().generics.alloc(generics)
}
fn lower_type_bounds(&mut self, node: &impl ast::TypeBoundsOwner) -> Vec<TypeBound> {
match node.type_bound_list() {
Some(bound_list) => {
bound_list.bounds().map(|it| TypeBound::from_ast(&self.body_ctx, it)).collect()
}
None => Vec::new(),
}
}
fn lower_visibility(&mut self, item: &impl ast::VisibilityOwner) -> RawVisibilityId {
let vis = match self.forced_visibility {
Some(vis) => return vis,
None => RawVisibility::from_ast_with_hygiene(item.visibility(), &self.hygiene),
};
self.data().vis.alloc(vis)
}
fn lower_type_ref(&self, type_ref: &ast::TypeRef) -> TypeRef {
TypeRef::from_ast(&self.body_ctx, type_ref.clone())
}
fn lower_type_ref_opt(&self, type_ref: Option<ast::TypeRef>) -> TypeRef {
type_ref.map(|ty| self.lower_type_ref(&ty)).unwrap_or(TypeRef::Error)
}
/// Forces the visibility `vis` to be used for all items lowered during execution of `f`.
fn with_inherited_visibility<R>(
&mut self,
vis: RawVisibilityId,
f: impl FnOnce(&mut Self) -> R,
) -> R {
let old = mem::replace(&mut self.forced_visibility, Some(vis));
let res = f(self);
self.forced_visibility = old;
res
}
fn next_field_idx(&self) -> Idx<Field> {
Idx::from_raw(RawId::from(
self.tree.data.as_ref().map_or(0, |data| data.fields.len() as u32),
))
}
fn | (&self) -> Idx<Variant> {
Idx::from_raw(RawId::from(
self.tree.data.as_ref().map_or(0, |data| data.variants.len() as u32),
))
}
}
fn desugar_future_path(orig: TypeRef) -> Path {
let path = path![core::future::Future];
let mut generic_args: Vec<_> = std::iter::repeat(None).take(path.segments.len() - 1).collect();
let mut last = GenericArgs::empty();
let binding =
AssociatedTypeBinding { name: name![Output], type_ref: Some(orig), bounds: Vec::new() };
last.bindings.push(binding);
generic_args.push(Some(Arc::new(last)));
Path::from_known_path(path, generic_args)
}
enum GenericsOwner<'a> {
/// We need access to the partially-lowered `Function` for lowering `impl Trait` in argument
/// position.
Function(&'a Function),
Struct,
Enum,
Union,
/// The `TraitDef` is needed to fill the source map for the implicit `Self` parameter.
Trait(&'a ast::TraitDef),
TypeAlias,
Impl,
}
| next_variant_idx |
ring_test.go | package ring_test
import (
"sync"
"testing"
"github.com/ecnepsnai/ring"
)
func TestParallelAdd(t *testing.T) {
t.Parallel()
maximum := 15
slice := ring.New(maximum)
wg := sync.WaitGroup{}
wg.Add(3)
go func() {
defer wg.Done()
i := 0
for i < maximum+1 {
slice.Add("1")
i++
}
}()
go func() {
defer wg.Done()
i := 0
for i < maximum+1 {
slice.Add("2")
i++
}
}()
go func() {
defer wg.Done()
i := 0
for i < maximum+1 {
slice.Add("3")
i++
}
}()
wg.Wait()
values := slice.All()
length := len(values)
if length != maximum {
t.Errorf("Ring length is not correct. Expected %d got %d", maximum, length)
}
last := slice.Last()
if last != values[0] {
t.Error("Incorrect sort order for all ring entries")
}
}
func TestAll(t *testing.T) {
t.Parallel()
ring := ring.New(10)
ring.Add(1)
objects := ring.All()
count := len(objects)
if count != 1 {
t.Errorf("Unexpected number of objects returned. Expected %d got %d", 1, count)
}
}
func TestEmptyAll(t *testing.T) {
t.Parallel()
slice := ring.New(15)
all := slice.All()
length := len(all)
if length != 0 {
t.Error("Non-empty array returned for empty ring")
}
}
func TestEmptyLast(t *testing.T) |
func TestTruncate(t *testing.T) {
t.Parallel()
slice := ring.New(5)
i := 0
for i < 10 {
slice.Add(i)
i++
}
length := len(slice.All())
if length != 5 {
t.Errorf("Unexpected length returned. Expected 5 got %d", length)
}
slice.Truncate()
length = len(slice.All())
if length != 0 {
t.Errorf("Unexpected length returned. Expected 0 got %d", length)
}
last := slice.Last()
if last != nil {
t.Error("Non-nil last value returned for empty ring")
}
}
| {
t.Parallel()
slice := ring.New(15)
last := slice.Last()
if last != nil {
t.Error("Non-nil last value returned for empty ring")
}
} |
ecdsa_test.py | import copy
from lto.accounts.ecdsa.account_factory_ecdsa import AccountFactoryECDSA
import base58
import pytest
from lto.transactions.anchor import Anchor
class TestAccountECDSA():
factory = AccountFactoryECDSA('L')
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = factory.create()
def test_make_key(self):
assert self.factory._MakeKey(self.seed).to_string() == (b'\xa7\x90:j\x80\xdb\x00}|~\x9e\x8cq]S\x97\x92\x97W\xfe\x17h>\xd5\xc1b\xa8\x1c|\x80\xc6%')
#@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_address(self):
assert self.factory.create_address(self.account.public_key) == self.account.address
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_sign_keys(self):
private_key, public_key, key_type = self.factory.create_sign_keys(self.seed)
assert self.account.public_key == public_key
assert self.account.private_key == private_key
assert key_type == 'secp256k1'
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_from_public(self):
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = AccountFactoryECDSA('T').create_from_seed(seed)
account2 = AccountFactoryECDSA('T').create_from_public_key(account.public_key)
# object
assert account.address == account2.address
assert account.public_key == account2.public_key
# bytes
public_key = b"5\xcf4\xeb\xe0\xd5,s\x00t\xc6to\x8b\xd0\x0e\xf8N\xe6\xa1\x1d\x13\x18s+\x11\x82\x7fR\x8d='\x03!a\x13H\xca=]\x8aV\xf71\x16C\x0c\x9ad{\x14z\x8e1\x9dg\x8b\xb2\xf2\x9e\x0fo\xa7\x9d"
account3 = AccountFactoryECDSA('T').create_from_public_key(public_key)
assert account.address == account3.address
assert account.public_key == account3.public_key
# b58 str
account4 = AccountFactoryECDSA('T').create_from_public_key(base58.b58encode(public_key))
assert account.address == account4.address
assert account.public_key == account4.public_key
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_from_private_key(self):
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = AccountFactoryECDSA('T').create_from_seed(seed)
account2 = AccountFactoryECDSA('T').create_from_private_key(account.private_key)
# object
assert account.address == account2.address
assert account.private_key == account2.private_key
assert account.public_key == account2.public_key
# bytes
private_key = b'\xa7\x90:j\x80\xdb\x00}|~\x9e\x8cq]S\x97\x92\x97W\xfe\x17h>\xd5\xc1b\xa8\x1c|\x80\xc6%'
account3 = AccountFactoryECDSA('T').create_from_private_key(private_key) | assert account.address == account3.address
assert account.private_key == account3.private_key
assert account.public_key == account3.public_key
# b58 str
account4 = AccountFactoryECDSA('T').create_from_private_key(base58.b58encode(private_key))
assert account.address == account4.address
assert account.private_key == account4.private_key
assert account.public_key == account4.public_key
def test_verify_random_account_signed_transaction(self):
account = self.factory.create()
transaction = Anchor('rtrtrtr')
transaction.sign_with(account)
cloned_tx = copy.copy(transaction)
cloned_tx.proofs = []
message = cloned_tx.to_binary()
assert account.verify_signature(message, transaction.proofs[0]) is True | |
main.go | package main
/*
$ curl -i http://localhost:8080/index
<h1>Index Page</h1>
$ curl -i http://localhost:8080/v1/
<h1>Hello V1 Page</h1>
$ curl http://localhost:8080/v1/hello?name=lovecucu
hello lovecucu, you're at /v1/hello
$ curl http://localhost:8080/v2/hello/lovecucu
hello lovecucu, you're at /v2/hello/lovecucu
$ curl "http://localhost:8080/v2/login" -X POST -d 'username=lovecucu&password=1234'
{"password":"1234","username":"lovecucu"}
$ curl http://localhost:8080/assets/a.cs
{"filepath":"a.cs"}
$ curl http://localhost:8080/hello
404 NOT FOUND: /hello
*/
import (
"gen"
"net/http"
)
func | () {
engine := gen.New()
engine.GET("/index", func(c *gen.Context) {
c.HTML(http.StatusOK, "<h1>Index Page</h1>\n")
})
engine.GET("/assets/*filepath", func(c *gen.Context) {
// expect /assets/css/a.css
c.JSON(http.StatusOK, gen.H{"filepath": c.Param("filepath")})
})
v1 := engine.Group("/v1")
{
v1.GET("/", func(c *gen.Context) {
c.HTML(http.StatusOK, "<h1>Hello V1 Page</h1>\n")
})
v1.GET("/hello", func(c *gen.Context) {
// expect /hello?name=lovecucu
c.String(http.StatusOK, "hello %s, you're at %s\n", c.Query("name"), c.Path)
})
}
v2 := engine.Group("/v2")
{
v2.GET("/hello/:name", func(c *gen.Context) {
// expect /hello/lovecucu
c.String(http.StatusOK, "hello %s, you're at %s\n", c.Param("name"), c.Path)
})
v2.POST("/login", func(c *gen.Context) {
c.JSON(http.StatusOK, gen.H{
"username": c.PostForm("username"),
"password": c.PostForm("password"),
})
})
}
engine.Run(":8080")
}
| main |
mod.rs | use arrayvec::ArrayString;
use wio_terminal::LCD;
use core::fmt::Write;
use embedded_graphics::{egtext, fonts::{Font24x32, Font12x16, Text}, geometry::Point, pixelcolor::Rgb565, prelude::*, style::TextStyle, text_style};
pub fn | (mut display: LCD) -> LCD {
Text::new("Air Quality", Point::new(20, 30))
.into_styled(TextStyle::new(Font24x32, Rgb565::BLUE))
.draw(&mut display)
.unwrap();
Text::new("Carbon Dioxide:", Point::new(5, 90))
.into_styled(TextStyle::new(Font12x16, Rgb565::GREEN))
.draw(&mut display)
.unwrap();
Text::new("Temperature:", Point::new(5, 130))
.into_styled(TextStyle::new(Font12x16, Rgb565::GREEN))
.draw(&mut display)
.unwrap();
Text::new("Humidity:", Point::new(5, 170))
.into_styled(TextStyle::new(Font12x16, Rgb565::GREEN))
.draw(&mut display)
.unwrap();
display
}
pub fn draw_numbers(
value: f32,
unit: &str,
position: (i32, i32),
mut display: LCD,
) -> LCD {
let mut buf = ArrayString::<[_; 12]>::new();
write!(&mut buf, "{:.2} {}", value, unit).expect("Failed to write to buffer");
egtext!(
text = &buf,
top_left = position,
style = text_style!(font = Font12x16, text_color = Rgb565::GREEN,)
)
.draw(&mut display)
.unwrap();
display
}
| draw_text |
company_routes.py | # web_app/routes/company_routes.py
import pandas as pd
from flask import Blueprint, jsonify, request, render_template #, flash, redirect
from web_app.models import *
company_routes = Blueprint("company_routes", __name__)
@company_routes.route("/div_yield")
def | ():
return render_template("highest_DivYield_charts.html")
@company_routes.route("/highest_increasing_divs")
def seecompanies_w_highest_dividend_increases():
return render_template("companies_w_highest_dividend_increases.html")
@company_routes.route("/most_affordable_div_payers")
def seemost_affordable_div_payers():
return render_template("most_affordable.html")
@company_routes.route("/companies")
def list_companies_for_humans():
return render_template("All_SP500.html", message="Here's all the companies on the S&P 500",
companies=get_AllCompanies())
@company_routes.route("/test")
def seeTEST():
return render_template("test.html", message="Here's all the companies on the S&P 500")
def get_AllCompanies():
all = Company_Info.query.all()
names = [record.Company_Name for record in all]
return names
def createCompanyInfoTable(): # ran once
SandP500 = pd.read_csv('../DIYInvestmentPrimer/SandP_500_companies.csv')
for x in range(0, len(SandP500)):
db.create_all()
company_entry = Company_Info.query.get
(Company_Info(Company_Name=SandP500['Security'][x],
Company_Ticker=SandP500['Symbol'][x],
Sector=SandP500['GICS Sector'][x],
SubIndustry=SandP500['GICS Sub-Industry'][x],
HQ_Location=SandP500['Headquarters Location'][x],
Date_first_added_to_SP500=SandP500['Date first added'][x],
Founded=SandP500['Founded'][x]))
db.session.add(company_entry)
db.session.commit()
| seeDivYield |
particles.rs |
use crate::math::*;
use crate::core::*;
///
/// Shader program used for rendering [Particles](Particles).
/// The fragment shader code can use position (`in vec3 pos;`) normal (`in vec3 nor;`) and uv coordinates (`in vec2 uvs;`).
///
pub struct ParticlesProgram {
program: Program,
use_normals: bool,
use_uvs: bool,
}
impl ParticlesProgram {
pub fn new(context: &Context, fragment_shader_source: &str) -> Result<Self, Error> {
let use_positions = fragment_shader_source.find("in vec3 pos;").is_some();
let use_normals = fragment_shader_source.find("in vec3 nor;").is_some();
let use_uvs = fragment_shader_source.find("in vec2 uvs;").is_some();
let vertex_shader_source = &format!("
layout (std140) uniform Camera
{{
mat4 viewProjection;
mat4 view;
mat4 projection;
vec3 position;
float padding;
}} camera;
uniform float time;
uniform vec3 acceleration;
in vec3 start_position;
in vec3 start_velocity;
uniform mat4 modelMatrix;
in vec3 position;
{} // Positions out
{} // Normals in/out
{} // UV coordinates in/out
void main()
{{
vec3 p = start_position + start_velocity * time + 0.5 * acceleration * time * time;
gl_Position = camera.projection * (camera.view * modelMatrix * vec4(p, 1.0) + vec4(position, 0.0));
{} // Position
{} // Normal
{} // UV coordinates
}}
",
if use_positions {"out vec3 pos;"} else {""},
if use_normals {
"uniform mat4 normalMatrix;
in vec3 normal;
out vec3 nor;"
} else {""},
if use_uvs {
"in vec2 uv_coordinates;
out vec2 uvs;"
} else {""},
if use_positions {"pos = worldPosition.xyz;"} else {""},
if use_normals { "nor = mat3(normalMatrix) * normal;" } else {""},
if use_uvs { "uvs = uv_coordinates;" } else {""}
);
let program = Program::from_source(context, vertex_shader_source, fragment_shader_source)?;
Ok(Self {program, use_normals, use_uvs})
}
}
impl std::ops::Deref for ParticlesProgram {
type Target = Program;
fn deref(&self) -> &Program |
}
///
/// Used to define the initial position and velocity of a particle in [Particles](Particles).
///
pub struct ParticleData {
pub start_position: Vec3,
pub start_velocity: Vec3
}
///
/// Particle effect with fixed vertex shader and customizable fragment shader (see also [ParticlesProgram](ParticlesProgram)).
///
/// Each particle is initialised with a position and velocity using the [update](Particles::update) function and a global acceleration.
/// Then when time passes, their position is updated based on
/// `new_position = start_position + start_velocity * time + 0.5 * acceleration * time * time`
///
pub struct Particles {
start_position_buffer: VertexBuffer,
start_velocity_buffer: VertexBuffer,
position_buffer: VertexBuffer,
normal_buffer: Option<VertexBuffer>,
uv_buffer: Option<VertexBuffer>,
index_buffer: Option<ElementBuffer>,
pub acceleration: Vec3,
instance_count: u32
}
impl Particles {
pub fn new(context: &Context, cpu_mesh: &CPUMesh, acceleration: &Vec3) -> Result<Self, Error>
{
let position_buffer = VertexBuffer::new_with_static_f32(context, &cpu_mesh.positions)?;
let normal_buffer = if let Some(ref normals) = cpu_mesh.normals { Some(VertexBuffer::new_with_static_f32(context, normals)?) } else {None};
let index_buffer = if let Some(ref ind) = cpu_mesh.indices { Some(ElementBuffer::new_with_u32(context, ind)?) } else {None};
let uv_buffer = if let Some(ref uvs) = cpu_mesh.uvs { Some(VertexBuffer::new_with_static_f32(context, uvs)?) } else {None};
Ok(Self {
position_buffer, index_buffer, normal_buffer, uv_buffer,
start_position_buffer: VertexBuffer::new_with_dynamic_f32(context, &[])?,
start_velocity_buffer: VertexBuffer::new_with_dynamic_f32(context, &[])?,
acceleration: *acceleration,
instance_count: 0
})
}
///
/// Updates the particles with the given initial data.
/// The list contain one entry for each particle.
///
pub fn update(&mut self, data: &[ParticleData])
{
let mut start_position = Vec::new();
let mut start_velocity = Vec::new();
for particle in data {
start_position.push(particle.start_position.x);
start_position.push(particle.start_position.y);
start_position.push(particle.start_position.z);
start_velocity.push(particle.start_velocity.x);
start_velocity.push(particle.start_velocity.y);
start_velocity.push(particle.start_velocity.z);
}
self.start_position_buffer.fill_with_dynamic_f32(&start_position);
self.start_velocity_buffer.fill_with_dynamic_f32(&start_velocity);
self.instance_count = data.len() as u32;
}
///
/// Render all defined particles with the given [ParticlesProgram](ParticlesProgram).
///
pub fn render(&self, program: &ParticlesProgram, render_states: RenderStates, viewport: Viewport, transformation: &Mat4, camera: &camera::Camera, time: f32) -> Result<(), Error>
{
program.add_uniform_mat4("modelMatrix", &transformation)?;
program.add_uniform_vec3("acceleration", &self.acceleration)?;
program.add_uniform_float("time", &time)?;
program.use_uniform_block(camera.matrix_buffer(), "Camera");
program.use_attribute_vec3_divisor(&self.start_position_buffer, "start_position", 1)?;
program.use_attribute_vec3_divisor(&self.start_velocity_buffer, "start_velocity", 1)?;
program.use_attribute_vec3(&self.position_buffer, "position")?;
if program.use_uvs {
let uv_buffer = self.uv_buffer.as_ref().ok_or(
Error::FailedToCreateMesh {message: "The particles shader program needs uv coordinates, but the mesh does not have any.".to_string()})?;
program.use_attribute_vec2(uv_buffer, "uv_coordinates")?;
}
if program.use_normals {
let normal_buffer = self.normal_buffer.as_ref().ok_or(
Error::FailedToCreateMesh {message: "The particles shader program needs normals, but the mesh does not have any. Consider calculating the normals on the CPUMesh.".to_string()})?;
program.add_uniform_mat4("normalMatrix", &transformation.invert().unwrap().transpose())?;
program.use_attribute_vec3(normal_buffer, "normal")?;
}
if let Some(ref index_buffer) = self.index_buffer {
program.draw_elements_instanced(render_states, viewport,index_buffer, self.instance_count);
} else {
program.draw_arrays_instanced(render_states, viewport,self.position_buffer.count() as u32/3, self.instance_count);
}
Ok(())
}
} | {
&self.program
} |
serviceendpointpolicydefinitions.go | package network
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// ServiceEndpointPolicyDefinitionsClient is the network Client
type ServiceEndpointPolicyDefinitionsClient struct {
BaseClient
}
// NewServiceEndpointPolicyDefinitionsClient creates an instance of the ServiceEndpointPolicyDefinitionsClient client.
func NewServiceEndpointPolicyDefinitionsClient(subscriptionID string) ServiceEndpointPolicyDefinitionsClient {
return NewServiceEndpointPolicyDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewServiceEndpointPolicyDefinitionsClientWithBaseURI creates an instance of the
// ServiceEndpointPolicyDefinitionsClient client using a custom endpoint. Use this when interacting with an Azure
// cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewServiceEndpointPolicyDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) ServiceEndpointPolicyDefinitionsClient {
return ServiceEndpointPolicyDefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a service endpoint policy definition in the specified service endpoint policy.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceEndpointPolicyName - the name of the service endpoint policy.
// serviceEndpointPolicyDefinitionName - the name of the service endpoint policy definition name.
// serviceEndpointPolicyDefinitions - parameters supplied to the create or update service endpoint policy
// operation.
func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string, serviceEndpointPolicyDefinitions ServiceEndpointPolicyDefinition) (result ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPolicyDefinitionsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName, serviceEndpointPolicyDefinitions)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string, serviceEndpointPolicyDefinitions ServiceEndpointPolicyDefinition) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceEndpointPolicyDefinitionName": autorest.Encode("path", serviceEndpointPolicyDefinitionName),
"serviceEndpointPolicyName": autorest.Encode("path", serviceEndpointPolicyName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
serviceEndpointPolicyDefinitions.Etag = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}", pathParameters),
autorest.WithJSON(serviceEndpointPolicyDefinitions),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdateSender(req *http.Request) (future ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdateResponder(resp *http.Response) (result ServiceEndpointPolicyDefinition, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the specified ServiceEndpoint policy definitions.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceEndpointPolicyName - the name of the Service Endpoint Policy.
// serviceEndpointPolicyDefinitionName - the name of the service endpoint policy definition.
func (client ServiceEndpointPolicyDefinitionsClient) Delete(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string) (result ServiceEndpointPolicyDefinitionsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPolicyDefinitionsClient.Delete")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "Delete", nil, "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client ServiceEndpointPolicyDefinitionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceEndpointPolicyDefinitionName": autorest.Encode("path", serviceEndpointPolicyDefinitionName),
"serviceEndpointPolicyName": autorest.Encode("path", serviceEndpointPolicyName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPolicyDefinitionsClient) DeleteSender(req *http.Request) (future ServiceEndpointPolicyDefinitionsDeleteFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPolicyDefinitionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get get the specified service endpoint policy definitions from service endpoint policy.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceEndpointPolicyName - the name of the service endpoint policy name.
// serviceEndpointPolicyDefinitionName - the name of the service endpoint policy definition name.
func (client ServiceEndpointPolicyDefinitionsClient) Get(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string) (result ServiceEndpointPolicyDefinition, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPolicyDefinitionsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName)
if err != nil { |
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client ServiceEndpointPolicyDefinitionsClient) GetPreparer(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceEndpointPolicyDefinitionName": autorest.Encode("path", serviceEndpointPolicyDefinitionName),
"serviceEndpointPolicyName": autorest.Encode("path", serviceEndpointPolicyName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPolicyDefinitionsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPolicyDefinitionsClient) GetResponder(resp *http.Response) (result ServiceEndpointPolicyDefinition, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByResourceGroup gets all service endpoint policy definitions in a service end point policy.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceEndpointPolicyName - the name of the service endpoint policy name.
func (client ServiceEndpointPolicyDefinitionsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string) (result ServiceEndpointPolicyDefinitionListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPolicyDefinitionsClient.ListByResourceGroup")
defer func() {
sc := -1
if result.sepdlr.Response.Response != nil {
sc = result.sepdlr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, serviceEndpointPolicyName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.sepdlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.sepdlr, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "ListByResourceGroup", resp, "Failure responding to request")
return
}
if result.sepdlr.hasNextLink() && result.sepdlr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client ServiceEndpointPolicyDefinitionsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceEndpointPolicyName": autorest.Encode("path", serviceEndpointPolicyName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPolicyDefinitionsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPolicyDefinitionsClient) ListByResourceGroupResponder(resp *http.Response) (result ServiceEndpointPolicyDefinitionListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client ServiceEndpointPolicyDefinitionsClient) listByResourceGroupNextResults(ctx context.Context, lastResults ServiceEndpointPolicyDefinitionListResult) (result ServiceEndpointPolicyDefinitionListResult, err error) {
req, err := lastResults.serviceEndpointPolicyDefinitionListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client ServiceEndpointPolicyDefinitionsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string) (result ServiceEndpointPolicyDefinitionListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceEndpointPolicyDefinitionsClient.ListByResourceGroup")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, serviceEndpointPolicyName)
return
} | err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsClient", "Get", nil, "Failure preparing request")
return
} |
v1StatefulSet.ts | /**
* Kubernetes
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: v1.20.2
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import { RequestFile } from '../api';
import { V1ObjectMeta } from './v1ObjectMeta';
import { V1StatefulSetSpec } from './v1StatefulSetSpec';
import { V1StatefulSetStatus } from './v1StatefulSetStatus';
/**
* StatefulSet represents a set of pods with consistent identities. Identities are defined as: - Network: A single stable DNS and hostname. - Storage: As many VolumeClaims as requested. The StatefulSet guarantees that a given network identity will always map to the same storage identity.
*/
export class | {
/**
* APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
*/
'apiVersion'?: string;
/**
* Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
*/
'kind'?: string;
'metadata'?: V1ObjectMeta;
'spec'?: V1StatefulSetSpec;
'status'?: V1StatefulSetStatus;
static discriminator: string | undefined = undefined;
static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [
{
"name": "apiVersion",
"baseName": "apiVersion",
"type": "string"
},
{
"name": "kind",
"baseName": "kind",
"type": "string"
},
{
"name": "metadata",
"baseName": "metadata",
"type": "V1ObjectMeta"
},
{
"name": "spec",
"baseName": "spec",
"type": "V1StatefulSetSpec"
},
{
"name": "status",
"baseName": "status",
"type": "V1StatefulSetStatus"
} ];
static getAttributeTypeMap() {
return V1StatefulSet.attributeTypeMap;
}
}
| V1StatefulSet |
0018_matchdata_selected_team.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-29 17:29
from __future__ import unicode_literals
from django.db import migrations, models
class | (migrations.Migration):
dependencies = [
('matches', '0017_auto_20190313_1304'),
]
operations = [
migrations.AddField(
model_name='matchdata',
name='selected_team',
field=models.CharField(blank=True, max_length=10000, null=True),
),
]
| Migration |
mod.rs | use core::fmt::{Result, Write};
use core::marker::PhantomData;
use core::ptr;
use crate::hal::prelude::*;
use crate::hal::serial;
use nb::block;
#[cfg(any(feature = "stm32f745", feature = "stm32f746",))]
use crate::device::{RCC, USART1, USART2, USART3, USART6};
#[cfg(any(feature = "stm32f745", feature = "stm32f746",))]
use crate::gpio::{
gpioa::{PA2, PA3, PA9},
gpiob::{PB10, PB11},
gpioc::{PC6, PC7, PC10, PC11},
gpiod::{PD5, PD6, PD8, PD9},
gpiog::{PG14, PG9},
};
#[cfg(any(feature = "stm32f745", feature = "stm32f746",))]
use crate::gpio::gpiob::PB7;
#[cfg(any(feature = "stm32f745", feature = "stm32f746",))]
use crate::gpio::{Alternate, AF7, AF8};
use crate::rcc::Clocks;
use crate::time::Bps; |
/// Interrupt event
#[derive(Debug)]
pub enum Event {
/// New data has been received
Rxne,
/// New data can be sent
Txe,
}
/// Serial error
#[derive(Debug)]
pub enum Error {
/// Framing error
Framing,
/// Noise error
Noise,
/// RX buffer overrun
Overrun,
/// Parity check error
Parity,
#[doc(hidden)]
_Extensible,
}
pub trait Pins<USART> {}
pub trait PinTx<USART> {}
pub trait PinRx<USART> {}
impl<USART, TX, RX> Pins<USART> for (TX, RX)
where
TX: PinTx<USART>,
RX: PinRx<USART>,
{
}
#[cfg(any(feature = "stm32f745", feature = "stm32f746",))]
impl PinTx<USART1> for PA9<Alternate<AF7>> {}
impl PinTx<USART2> for PA2<Alternate<AF7>> {}
impl PinTx<USART2> for PD5<Alternate<AF7>> {}
impl PinTx<USART3> for PB10<Alternate<AF7>> {}
impl PinTx<USART3> for PC10<Alternate<AF7>> {}
impl PinTx<USART3> for PD8<Alternate<AF7>> {}
impl PinTx<USART6> for PC6<Alternate<AF8>> {}
impl PinTx<USART6> for PG14<Alternate<AF8>> {}
#[cfg(any(feature = "stm32f745", feature = "stm32f746",))]
impl PinRx<USART1> for PB7<Alternate<AF7>> {}
impl PinRx<USART2> for PA3<Alternate<AF7>> {}
impl PinRx<USART2> for PD6<Alternate<AF7>> {}
impl PinRx<USART3> for PB11<Alternate<AF7>> {}
impl PinRx<USART3> for PC11<Alternate<AF7>> {}
impl PinRx<USART3> for PD9<Alternate<AF7>> {}
impl PinRx<USART6> for PC7<Alternate<AF8>> {}
impl PinRx<USART6> for PG9<Alternate<AF8>> {}
/// Serial abstraction
pub struct Serial<USART, PINS> {
usart: USART,
pins: PINS,
}
/// Serial receiver
pub struct Rx<USART> {
_usart: PhantomData<USART>,
}
/// Serial transmitter
pub struct Tx<USART> {
_usart: PhantomData<USART>,
}
#[macro_use]
mod macros;
#[cfg(any(feature = "stm32f745", feature = "stm32f746",))]
halUsart! {
USART1: (usart1, apb2enr, usart1en),
USART2: (usart2, apb1enr, usart2en),
USART3: (usart3, apb1enr, usart3en),
USART6: (usart6, apb2enr, usart6en),
}
impl<USART> Write for Tx<USART>
where
Tx<USART>: serial::Write<u8>,
{
fn write_str(&mut self, s: &str) -> Result {
let _ = s
.as_bytes()
.into_iter()
.map(|c| block!(self.write(*c)))
.last();
Ok(())
}
} | |
binaryop_packed_gpu.d.ts | /**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import { GPGPUProgram } from './gpgpu_math';
export declare const DIV = "\n // vec4 one = vec4(equal(a, b));\n // return one + (vec4(1.0) - one) * a / b;\n vec4 result = a / b;\n if(a.x == b.x) {\n result.x = 1.;\n }\n if(a.y == b.y) {\n result.y = 1.;\n }\n if(a.z == b.z) {\n result.z = 1.;\n }\n if(a.w == b.w) {\n result.w = 1.;\n }\n\n return result;\n";
export declare const INT_DIV = "\n ivec4 ia = round(a);\n ivec4 ib = round(b);\n bvec4 cond = notEqual(ib, ivec4(0));\n ivec4 result = ivec4(0);\n vec4 s = sign(a) * sign(b);\n\n // Windows (D3D) wants guaranteed non-zero int division at compile-time.\n if (cond[0]) {\n result[0] = idiv(ia[0], ib[0], s[0]);\n }\n if (cond[1]) {\n result[1] = idiv(ia[1], ib[1], s[1]);\n }\n if (cond[2]) {\n result[2] = idiv(ia[2], ib[2], s[2]);\n }\n if (cond[3]) {\n result[3] = idiv(ia[3], ib[3], s[3]);\n }\n return vec4(result);\n";
export declare const POW: string;
export declare const PRELU = "\n vec4 aLessThanZero = vec4(lessThan(a, vec4(0.)));\n return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a);\n";
export declare const ELU_DER = "\n vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.)));\n return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0))));\n";
export declare const ATAN2: string;
export declare const EQUAL = "\n return vec4(equal(a, b));\n";
export declare const NOT_EQUAL = "\n return vec4(notEqual(a, b));\n";
export declare const LESS = "\n return vec4(lessThan(a, b));\n";
export declare const LESS_EQUAL = "\n return vec4(lessThanEqual(a, b));\n";
export declare const GREATER = "\n return vec4(greaterThan(a, b));\n";
export declare const GREATER_EQUAL = "\n return vec4(greaterThanEqual(a, b));\n";
export declare const LOGICAL_AND = "\n return vec4(\n vec4(greaterThanEqual(a, vec4(1.0))) *\n vec4(greaterThanEqual(b, vec4(1.0))));\n";
export declare const LOGICAL_OR = "\n return min(\n vec4(greaterThanEqual(a, vec4(1.0))) +\n vec4(greaterThanEqual(b, vec4(1.0))),\n vec4(1.0));\n";
export declare const MAX: string;
export declare const MIN: string; | outputShape: number[];
userCode: string;
supportsBroadcasting: boolean;
packedInputs: boolean;
packedOutput: boolean;
constructor(op: string, aShape: number[], bShape: number[], checkOutOfBounds?: boolean);
} | export declare const MOD: string;
export declare class BinaryOpPackedProgram implements GPGPUProgram {
variableNames: string[]; |
version.go | // Deprecated: We’re retiring the Azure Video Analyzer preview service; you're advised to transition your applications off of Video Analyzer by 01 December 2022. This SDK is no longer maintained and won’t work after the service is retired. To learn how to transition off, please refer to: https://aka.ms/azsdk/videoanalyzer/transitionoffguidance.
package videoanalyzer
import "github.com/Azure/azure-sdk-for-go/version"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// UserAgent returns the UserAgent string to use when sending http.Requests.
func User | tring {
return "Azure-SDK-For-Go/" + Version() + " videoanalyzer/2021-05-01-preview"
}
// Version returns the semantic version (see http://semver.org) of the client.
func Version() string {
return version.Number
}
| Agent() s |
Message.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#![allow(dead_code)]
#![allow(unused_imports)]
use crate::ipc::gen::Schema::*;
use crate::ipc::gen::SparseTensor::*;
use crate::ipc::gen::Tensor::*;
use flatbuffers::EndianScalar;
use std::{cmp::Ordering, mem};
// automatically generated by the FlatBuffers compiler, do not modify
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MIN_COMPRESSION_TYPE: i8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MAX_COMPRESSION_TYPE: i8 = 1;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_COMPRESSION_TYPE: [CompressionType; 2] =
[CompressionType::LZ4_FRAME, CompressionType::ZSTD];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct CompressionType(pub i8);
#[allow(non_upper_case_globals)]
impl CompressionType {
pub const LZ4_FRAME: Self = Self(0);
pub const ZSTD: Self = Self(1);
pub const ENUM_MIN: i8 = 0;
pub const ENUM_MAX: i8 = 1;
pub const ENUM_VALUES: &'static [Self] = &[Self::LZ4_FRAME, Self::ZSTD];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::LZ4_FRAME => Some("LZ4_FRAME"),
Self::ZSTD => Some("ZSTD"),
_ => None,
}
}
}
impl std::fmt::Debug for CompressionType {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for CompressionType {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = flatbuffers::read_scalar_at::<i8>(buf, loc);
Self(b)
}
}
impl flatbuffers::Push for CompressionType {
type Output = CompressionType;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<i8>(dst, self.0);
}
}
impl flatbuffers::EndianScalar for CompressionType {
#[inline]
fn to_little_endian(self) -> Self {
let b = i8::to_le(self.0);
Self(b)
}
#[inline]
fn from_little_endian(self) -> Self {
let b = i8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for CompressionType {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
i8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for CompressionType {}
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MIN_BODY_COMPRESSION_METHOD: i8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MAX_BODY_COMPRESSION_METHOD: i8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_BODY_COMPRESSION_METHOD: [BodyCompressionMethod; 1] =
[BodyCompressionMethod::BUFFER];
/// Provided for forward compatibility in case we need to support different
/// strategies for compressing the IPC message body (like whole-body
/// compression rather than buffer-level) in the future
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct BodyCompressionMethod(pub i8);
#[allow(non_upper_case_globals)]
impl BodyCompressionMethod {
/// Each constituent buffer is first compressed with the indicated
/// compressor, and then written with the uncompressed length in the first 8
/// bytes as a 64-bit little-endian signed integer followed by the compressed
/// buffer bytes (and then padding as required by the protocol). The
/// uncompressed length may be set to -1 to indicate that the data that
/// follows is not compressed, which can be useful for cases where
/// compression does not yield appreciable savings.
pub const BUFFER: Self = Self(0);
pub const ENUM_MIN: i8 = 0;
pub const ENUM_MAX: i8 = 0;
pub const ENUM_VALUES: &'static [Self] = &[Self::BUFFER];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::BUFFER => Some("BUFFER"),
_ => None,
}
}
}
impl std::fmt::Debug for BodyCompressionMethod {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for BodyCompressionMethod {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = flatbuffers::read_scalar_at::<i8>(buf, loc);
Self(b)
}
}
impl flatbuffers::Push for BodyCompressionMethod {
type Output = BodyCompressionMethod;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<i8>(dst, self.0);
}
}
impl flatbuffers::EndianScalar for BodyCompressionMethod {
#[inline]
fn to_little_endian(self) -> Self {
let b = i8::to_le(self.0);
Self(b)
}
#[inline]
fn from_little_endian(self) -> Self {
let b = i8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for BodyCompressionMethod {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
i8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for BodyCompressionMethod {}
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MIN_MESSAGE_HEADER: u8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MAX_MESSAGE_HEADER: u8 = 5;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_MESSAGE_HEADER: [MessageHeader; 6] = [
MessageHeader::NONE,
MessageHeader::Schema,
MessageHeader::DictionaryBatch,
MessageHeader::RecordBatch,
MessageHeader::Tensor,
MessageHeader::SparseTensor,
];
/// ----------------------------------------------------------------------
/// The root Message type
/// This union enables us to easily send different message types without
/// redundant storage, and in the future we can easily add new message types.
///
/// Arrow implementations do not need to implement all of the message types,
/// which may include experimental metadata types. For maximum compatibility,
/// it is best to send data using RecordBatch
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct MessageHeader(pub u8);
#[allow(non_upper_case_globals)]
impl MessageHeader {
pub const NONE: Self = Self(0);
pub const Schema: Self = Self(1);
pub const DictionaryBatch: Self = Self(2);
pub const RecordBatch: Self = Self(3);
pub const Tensor: Self = Self(4);
pub const SparseTensor: Self = Self(5);
pub const ENUM_MIN: u8 = 0;
pub const ENUM_MAX: u8 = 5;
pub const ENUM_VALUES: &'static [Self] = &[
Self::NONE,
Self::Schema,
Self::DictionaryBatch,
Self::RecordBatch,
Self::Tensor,
Self::SparseTensor,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::NONE => Some("NONE"),
Self::Schema => Some("Schema"),
Self::DictionaryBatch => Some("DictionaryBatch"),
Self::RecordBatch => Some("RecordBatch"),
Self::Tensor => Some("Tensor"),
Self::SparseTensor => Some("SparseTensor"),
_ => None,
}
}
}
impl std::fmt::Debug for MessageHeader {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
pub struct MessageHeaderUnionTableOffset {}
impl<'a> flatbuffers::Follow<'a> for MessageHeader {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = flatbuffers::read_scalar_at::<u8>(buf, loc);
Self(b)
}
}
impl flatbuffers::Push for MessageHeader {
type Output = MessageHeader;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<u8>(dst, self.0);
}
}
impl flatbuffers::EndianScalar for MessageHeader {
#[inline]
fn to_little_endian(self) -> Self {
let b = u8::to_le(self.0);
Self(b)
}
#[inline]
fn from_little_endian(self) -> Self {
let b = u8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for MessageHeader {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
u8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for MessageHeader {}
/// ----------------------------------------------------------------------
/// Data structures for describing a table row batch (a collection of
/// equal-length Arrow arrays)
/// Metadata about a field at some level of a nested type tree (but not
/// its children).
///
/// For example, a List<Int16> with values `[[1, 2, 3], null, [4], [5, 6], null]`
/// would have {length: 5, null_count: 2} for its List node, and {length: 6,
/// null_count: 0} for its Int16 node, as separate FieldNode structs
// struct FieldNode, aligned to 8
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct FieldNode(pub [u8; 16]);
impl std::fmt::Debug for FieldNode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("FieldNode")
.field("length", &self.length())
.field("null_count", &self.null_count())
.finish()
}
}
impl flatbuffers::SimpleToVerifyInSlice for FieldNode {}
impl flatbuffers::SafeSliceAccess for FieldNode {}
impl<'a> flatbuffers::Follow<'a> for FieldNode {
type Inner = &'a FieldNode;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
<&'a FieldNode>::follow(buf, loc)
}
}
impl<'a> flatbuffers::Follow<'a> for &'a FieldNode {
type Inner = &'a FieldNode;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
flatbuffers::follow_cast_ref::<FieldNode>(buf, loc)
}
}
impl<'b> flatbuffers::Push for FieldNode {
type Output = FieldNode;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
let src = unsafe {
::std::slice::from_raw_parts(
self as *const FieldNode as *const u8,
Self::size(),
)
};
dst.copy_from_slice(src);
}
}
impl<'b> flatbuffers::Push for &'b FieldNode {
type Output = FieldNode;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
let src = unsafe {
::std::slice::from_raw_parts(
*self as *const FieldNode as *const u8,
Self::size(),
)
};
dst.copy_from_slice(src);
}
}
impl<'a> flatbuffers::Verifiable for FieldNode {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.in_buffer::<Self>(pos)
}
}
impl FieldNode {
#[allow(clippy::too_many_arguments)]
pub fn new(length: i64, null_count: i64) -> Self {
let mut s = Self([0; 16]);
s.set_length(length);
s.set_null_count(null_count);
s
}
/// The number of value slots in the Arrow array at this level of a nested
/// tree
pub fn length(&self) -> i64 {
let mut mem = core::mem::MaybeUninit::<i64>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i64>(),
);
mem.assume_init()
}
.from_little_endian()
}
pub fn set_length(&mut self, x: i64) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i64 as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<i64>(),
);
}
}
/// The number of observed nulls. Fields with null_count == 0 may choose not
/// to write their physical validity bitmap out as a materialized buffer,
/// instead setting the length of the bitmap buffer to 0.
pub fn null_count(&self) -> i64 {
let mut mem = core::mem::MaybeUninit::<i64>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[8..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i64>(),
);
mem.assume_init()
}
.from_little_endian()
}
pub fn set_null_count(&mut self, x: i64) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i64 as *const u8,
self.0[8..].as_mut_ptr(),
core::mem::size_of::<i64>(),
);
}
}
}
pub enum BodyCompressionOffset {}
#[derive(Copy, Clone, PartialEq)]
/// Optional compression for the memory buffers constituting IPC message
/// bodies. Intended for use with RecordBatch but could be used for other
/// message types
pub struct BodyCompression<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for BodyCompression<'a> {
type Inner = BodyCompression<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> BodyCompression<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
BodyCompression { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args BodyCompressionArgs,
) -> flatbuffers::WIPOffset<BodyCompression<'bldr>> {
let mut builder = BodyCompressionBuilder::new(_fbb);
builder.add_method(args.method);
builder.add_codec(args.codec);
builder.finish()
}
pub const VT_CODEC: flatbuffers::VOffsetT = 4;
pub const VT_METHOD: flatbuffers::VOffsetT = 6;
/// Compressor library
#[inline]
pub fn codec(&self) -> CompressionType {
self._tab
.get::<CompressionType>(
BodyCompression::VT_CODEC,
Some(CompressionType::LZ4_FRAME),
)
.unwrap()
}
/// Indicates the way the record batch body was compressed
#[inline]
pub fn method(&self) -> BodyCompressionMethod {
self._tab
.get::<BodyCompressionMethod>(
BodyCompression::VT_METHOD,
Some(BodyCompressionMethod::BUFFER),
)
.unwrap()
}
}
impl flatbuffers::Verifiable for BodyCompression<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<CompressionType>(&"codec", Self::VT_CODEC, false)?
.visit_field::<BodyCompressionMethod>(&"method", Self::VT_METHOD, false)?
.finish();
Ok(())
}
}
pub struct BodyCompressionArgs {
pub codec: CompressionType,
pub method: BodyCompressionMethod,
}
impl<'a> Default for BodyCompressionArgs {
#[inline]
fn default() -> Self {
BodyCompressionArgs {
codec: CompressionType::LZ4_FRAME,
method: BodyCompressionMethod::BUFFER,
}
}
}
pub struct BodyCompressionBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> BodyCompressionBuilder<'a, 'b> {
#[inline]
pub fn add_codec(&mut self, codec: CompressionType) {
self.fbb_.push_slot::<CompressionType>(
BodyCompression::VT_CODEC,
codec,
CompressionType::LZ4_FRAME,
);
}
#[inline]
pub fn add_method(&mut self, method: BodyCompressionMethod) {
self.fbb_.push_slot::<BodyCompressionMethod>(
BodyCompression::VT_METHOD,
method,
BodyCompressionMethod::BUFFER,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> BodyCompressionBuilder<'a, 'b> {
let start = _fbb.start_table();
BodyCompressionBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<BodyCompression<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for BodyCompression<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("BodyCompression");
ds.field("codec", &self.codec());
ds.field("method", &self.method());
ds.finish()
}
}
pub enum RecordBatchOffset {}
#[derive(Copy, Clone, PartialEq)]
/// A data header describing the shared memory layout of a "record" or "row"
/// batch. Some systems call this a "row batch" internally and others a "record
/// batch".
pub struct RecordBatch<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for RecordBatch<'a> {
type Inner = RecordBatch<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> RecordBatch<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
RecordBatch { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args RecordBatchArgs<'args>,
) -> flatbuffers::WIPOffset<RecordBatch<'bldr>> {
let mut builder = RecordBatchBuilder::new(_fbb);
builder.add_length(args.length);
if let Some(x) = args.compression {
builder.add_compression(x);
}
if let Some(x) = args.buffers {
builder.add_buffers(x);
}
if let Some(x) = args.nodes {
builder.add_nodes(x);
}
builder.finish()
}
pub const VT_LENGTH: flatbuffers::VOffsetT = 4;
pub const VT_NODES: flatbuffers::VOffsetT = 6;
pub const VT_BUFFERS: flatbuffers::VOffsetT = 8;
pub const VT_COMPRESSION: flatbuffers::VOffsetT = 10;
/// number of records / rows. The arrays in the batch should all have this
/// length
#[inline]
pub fn length(&self) -> i64 {
self._tab
.get::<i64>(RecordBatch::VT_LENGTH, Some(0))
.unwrap()
}
/// Nodes correspond to the pre-ordered flattened logical schema
#[inline]
pub fn nodes(&self) -> Option<&'a [FieldNode]> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, FieldNode>>>(
RecordBatch::VT_NODES,
None,
)
.map(|v| v.safe_slice())
}
/// Buffers correspond to the pre-ordered flattened buffer tree
///
/// The number of buffers appended to this list depends on the schema. For
/// example, most primitive arrays will have 2 buffers, 1 for the validity
/// bitmap and 1 for the values. For struct arrays, there will only be a
/// single buffer for the validity (nulls) bitmap
#[inline]
pub fn buffers(&self) -> Option<&'a [Buffer]> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, Buffer>>>(
RecordBatch::VT_BUFFERS,
None,
)
.map(|v| v.safe_slice())
}
/// Optional compression of the message body
#[inline]
pub fn compression(&self) -> Option<BodyCompression<'a>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<BodyCompression>>(
RecordBatch::VT_COMPRESSION,
None,
)
}
}
impl flatbuffers::Verifiable for RecordBatch<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<i64>(&"length", Self::VT_LENGTH, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, FieldNode>>>(&"nodes", Self::VT_NODES, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, Buffer>>>(&"buffers", Self::VT_BUFFERS, false)?
.visit_field::<flatbuffers::ForwardsUOffset<BodyCompression>>(&"compression", Self::VT_COMPRESSION, false)?
.finish();
Ok(())
}
}
pub struct RecordBatchArgs<'a> {
pub length: i64,
pub nodes: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, FieldNode>>>,
pub buffers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, Buffer>>>,
pub compression: Option<flatbuffers::WIPOffset<BodyCompression<'a>>>,
}
impl<'a> Default for RecordBatchArgs<'a> {
#[inline]
fn default() -> Self {
RecordBatchArgs {
length: 0,
nodes: None,
buffers: None,
compression: None,
}
}
}
pub struct RecordBatchBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> RecordBatchBuilder<'a, 'b> {
#[inline]
pub fn add_length(&mut self, length: i64) {
self.fbb_
.push_slot::<i64>(RecordBatch::VT_LENGTH, length, 0);
}
#[inline]
pub fn add_nodes(
&mut self,
nodes: flatbuffers::WIPOffset<flatbuffers::Vector<'b, FieldNode>>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<_>>(RecordBatch::VT_NODES, nodes);
}
#[inline]
pub fn add_buffers(
&mut self,
buffers: flatbuffers::WIPOffset<flatbuffers::Vector<'b, Buffer>>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
RecordBatch::VT_BUFFERS,
buffers,
);
}
#[inline]
pub fn add_compression(
&mut self,
compression: flatbuffers::WIPOffset<BodyCompression<'b>>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<BodyCompression>>(
RecordBatch::VT_COMPRESSION,
compression,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> RecordBatchBuilder<'a, 'b> {
let start = _fbb.start_table();
RecordBatchBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<RecordBatch<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for RecordBatch<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("RecordBatch");
ds.field("length", &self.length());
ds.field("nodes", &self.nodes());
ds.field("buffers", &self.buffers());
ds.field("compression", &self.compression());
ds.finish()
}
}
pub enum DictionaryBatchOffset {}
#[derive(Copy, Clone, PartialEq)]
/// For sending dictionary encoding information. Any Field can be
/// dictionary-encoded, but in this case none of its children may be
/// dictionary-encoded.
/// There is one vector / column per dictionary, but that vector / column
/// may be spread across multiple dictionary batches by using the isDelta
/// flag
pub struct DictionaryBatch<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for DictionaryBatch<'a> {
type Inner = DictionaryBatch<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> DictionaryBatch<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
DictionaryBatch { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args DictionaryBatchArgs<'args>,
) -> flatbuffers::WIPOffset<DictionaryBatch<'bldr>> {
let mut builder = DictionaryBatchBuilder::new(_fbb);
builder.add_id(args.id);
if let Some(x) = args.data {
builder.add_data(x);
}
builder.add_isDelta(args.isDelta);
builder.finish()
}
pub const VT_ID: flatbuffers::VOffsetT = 4;
pub const VT_DATA: flatbuffers::VOffsetT = 6;
pub const VT_ISDELTA: flatbuffers::VOffsetT = 8;
#[inline]
pub fn id(&self) -> i64 {
self._tab
.get::<i64>(DictionaryBatch::VT_ID, Some(0))
.unwrap()
}
#[inline]
pub fn data(&self) -> Option<RecordBatch<'a>> {
self._tab.get::<flatbuffers::ForwardsUOffset<RecordBatch>>(
DictionaryBatch::VT_DATA,
None,
)
}
/// If isDelta is true the values in the dictionary are to be appended to a
/// dictionary with the indicated id. If isDelta is false this dictionary
/// should replace the existing dictionary.
#[inline]
pub fn isDelta(&self) -> bool {
self._tab
.get::<bool>(DictionaryBatch::VT_ISDELTA, Some(false))
.unwrap()
}
}
impl flatbuffers::Verifiable for DictionaryBatch<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<i64>(&"id", Self::VT_ID, false)?
.visit_field::<flatbuffers::ForwardsUOffset<RecordBatch>>(
&"data",
Self::VT_DATA,
false,
)?
.visit_field::<bool>(&"isDelta", Self::VT_ISDELTA, false)?
.finish();
Ok(())
}
}
pub struct DictionaryBatchArgs<'a> {
pub id: i64,
pub data: Option<flatbuffers::WIPOffset<RecordBatch<'a>>>,
pub isDelta: bool,
}
impl<'a> Default for DictionaryBatchArgs<'a> {
#[inline]
fn default() -> Self {
DictionaryBatchArgs {
id: 0,
data: None,
isDelta: false,
}
}
}
pub struct DictionaryBatchBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> DictionaryBatchBuilder<'a, 'b> {
#[inline]
pub fn add_id(&mut self, id: i64) {
self.fbb_.push_slot::<i64>(DictionaryBatch::VT_ID, id, 0);
}
#[inline]
pub fn add_data(&mut self, data: flatbuffers::WIPOffset<RecordBatch<'b>>) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<RecordBatch>>(
DictionaryBatch::VT_DATA,
data,
);
}
#[inline]
pub fn add_isDelta(&mut self, isDelta: bool) {
self.fbb_
.push_slot::<bool>(DictionaryBatch::VT_ISDELTA, isDelta, false);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> DictionaryBatchBuilder<'a, 'b> {
let start = _fbb.start_table();
DictionaryBatchBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<DictionaryBatch<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for DictionaryBatch<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("DictionaryBatch");
ds.field("id", &self.id());
ds.field("data", &self.data());
ds.field("isDelta", &self.isDelta());
ds.finish()
}
}
pub enum MessageOffset {}
#[derive(Copy, Clone, PartialEq)]
pub struct Message<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for Message<'a> {
type Inner = Message<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> Message<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
Message { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args MessageArgs<'args>,
) -> flatbuffers::WIPOffset<Message<'bldr>> {
let mut builder = MessageBuilder::new(_fbb);
builder.add_bodyLength(args.bodyLength);
if let Some(x) = args.custom_metadata {
builder.add_custom_metadata(x);
}
if let Some(x) = args.header {
builder.add_header(x);
}
builder.add_version(args.version);
builder.add_header_type(args.header_type);
builder.finish()
}
pub const VT_VERSION: flatbuffers::VOffsetT = 4;
pub const VT_HEADER_TYPE: flatbuffers::VOffsetT = 6;
pub const VT_HEADER: flatbuffers::VOffsetT = 8;
pub const VT_BODYLENGTH: flatbuffers::VOffsetT = 10;
pub const VT_CUSTOM_METADATA: flatbuffers::VOffsetT = 12;
#[inline]
pub fn version(&self) -> MetadataVersion {
self._tab
.get::<MetadataVersion>(Message::VT_VERSION, Some(MetadataVersion::V1))
.unwrap()
}
#[inline]
pub fn header_type(&self) -> MessageHeader {
self._tab
.get::<MessageHeader>(Message::VT_HEADER_TYPE, Some(MessageHeader::NONE))
.unwrap()
}
#[inline]
pub fn header(&self) -> Option<flatbuffers::Table<'a>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Table<'a>>>(
Message::VT_HEADER,
None,
)
}
#[inline]
pub fn bodyLength(&self) -> i64 {
self._tab
.get::<i64>(Message::VT_BODYLENGTH, Some(0))
.unwrap()
}
#[inline]
pub fn custom_metadata(
&self,
) -> Option<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<KeyValue<'a>>>> {
self._tab.get::<flatbuffers::ForwardsUOffset<
flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<KeyValue>>,
>>(Message::VT_CUSTOM_METADATA, None)
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_schema(&self) -> Option<Schema<'a>> {
if self.header_type() == MessageHeader::Schema {
self.header().map(Schema::init_from_table)
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_dictionary_batch(&self) -> Option<DictionaryBatch<'a>> {
if self.header_type() == MessageHeader::DictionaryBatch {
self.header().map(DictionaryBatch::init_from_table)
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_record_batch(&self) -> Option<RecordBatch<'a>> {
if self.header_type() == MessageHeader::RecordBatch {
self.header().map(RecordBatch::init_from_table)
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_tensor(&self) -> Option<Tensor<'a>> {
if self.header_type() == MessageHeader::Tensor {
self.header().map(Tensor::init_from_table)
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_sparse_tensor(&self) -> Option<SparseTensor<'a>> {
if self.header_type() == MessageHeader::SparseTensor {
self.header().map(SparseTensor::init_from_table)
} else {
None
}
}
}
impl flatbuffers::Verifiable for Message<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<MetadataVersion>(&"version", Self::VT_VERSION, false)?
.visit_union::<MessageHeader, _>(&"header_type", Self::VT_HEADER_TYPE, &"header", Self::VT_HEADER, false, |key, v, pos| {
match key {
MessageHeader::Schema => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Schema>>("MessageHeader::Schema", pos),
MessageHeader::DictionaryBatch => v.verify_union_variant::<flatbuffers::ForwardsUOffset<DictionaryBatch>>("MessageHeader::DictionaryBatch", pos),
MessageHeader::RecordBatch => v.verify_union_variant::<flatbuffers::ForwardsUOffset<RecordBatch>>("MessageHeader::RecordBatch", pos),
MessageHeader::Tensor => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Tensor>>("MessageHeader::Tensor", pos),
MessageHeader::SparseTensor => v.verify_union_variant::<flatbuffers::ForwardsUOffset<SparseTensor>>("MessageHeader::SparseTensor", pos),
_ => Ok(()),
}
})?
.visit_field::<i64>(&"bodyLength", Self::VT_BODYLENGTH, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, flatbuffers::ForwardsUOffset<KeyValue>>>>(&"custom_metadata", Self::VT_CUSTOM_METADATA, false)?
.finish();
Ok(())
}
}
pub struct MessageArgs<'a> {
pub version: MetadataVersion,
pub header_type: MessageHeader,
pub header: Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>,
pub bodyLength: i64,
pub custom_metadata: Option<
flatbuffers::WIPOffset<
flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<KeyValue<'a>>>,
>,
>,
}
impl<'a> Default for MessageArgs<'a> {
#[inline]
fn default() -> Self {
MessageArgs {
version: MetadataVersion::V1,
header_type: MessageHeader::NONE,
header: None,
bodyLength: 0,
custom_metadata: None,
}
}
}
pub struct MessageBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> MessageBuilder<'a, 'b> {
#[inline]
pub fn add_version(&mut self, version: MetadataVersion) {
self.fbb_.push_slot::<MetadataVersion>(
Message::VT_VERSION,
version,
MetadataVersion::V1,
);
}
#[inline]
pub fn add_header_type(&mut self, header_type: MessageHeader) {
self.fbb_.push_slot::<MessageHeader>(
Message::VT_HEADER_TYPE,
header_type,
MessageHeader::NONE,
);
}
#[inline]
pub fn add_header(
&mut self,
header: flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<_>>(Message::VT_HEADER, header);
}
#[inline]
pub fn add_bodyLength(&mut self, bodyLength: i64) {
self.fbb_
.push_slot::<i64>(Message::VT_BODYLENGTH, bodyLength, 0);
}
#[inline]
pub fn add_custom_metadata(
&mut self,
custom_metadata: flatbuffers::WIPOffset<
flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<KeyValue<'b>>>,
>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
Message::VT_CUSTOM_METADATA,
custom_metadata,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> MessageBuilder<'a, 'b> {
let start = _fbb.start_table();
MessageBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<Message<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for Message<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("Message");
ds.field("version", &self.version());
ds.field("header_type", &self.header_type());
match self.header_type() {
MessageHeader::Schema => {
if let Some(x) = self.header_as_schema() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
MessageHeader::DictionaryBatch => {
if let Some(x) = self.header_as_dictionary_batch() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
MessageHeader::RecordBatch => {
if let Some(x) = self.header_as_record_batch() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
MessageHeader::Tensor => {
if let Some(x) = self.header_as_tensor() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
MessageHeader::SparseTensor => {
if let Some(x) = self.header_as_sparse_tensor() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
_ => {
let x: Option<()> = None;
ds.field("header", &x)
}
};
ds.field("bodyLength", &self.bodyLength());
ds.field("custom_metadata", &self.custom_metadata());
ds.finish()
}
}
#[inline]
#[deprecated(since = "2.0.0", note = "Deprecated in favor of `root_as...` methods.")]
pub fn get_root_as_message<'a>(buf: &'a [u8]) -> Message<'a> {
unsafe { flatbuffers::root_unchecked::<Message<'a>>(buf) }
}
#[inline]
#[deprecated(since = "2.0.0", note = "Deprecated in favor of `root_as...` methods.")]
pub fn get_size_prefixed_root_as_message<'a>(buf: &'a [u8]) -> Message<'a> {
unsafe { flatbuffers::size_prefixed_root_unchecked::<Message<'a>>(buf) }
}
#[inline]
/// Verifies that a buffer of bytes contains a `Message`
/// and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the | /// previous, unchecked, behavior use
/// `root_as_message_unchecked`.
pub fn root_as_message(buf: &[u8]) -> Result<Message, flatbuffers::InvalidFlatbuffer> {
flatbuffers::root::<Message>(buf)
}
#[inline]
/// Verifies that a buffer of bytes contains a size prefixed
/// `Message` and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `size_prefixed_root_as_message_unchecked`.
pub fn size_prefixed_root_as_message(
buf: &[u8],
) -> Result<Message, flatbuffers::InvalidFlatbuffer> {
flatbuffers::size_prefixed_root::<Message>(buf)
}
#[inline]
/// Verifies, with the given options, that a buffer of bytes
/// contains a `Message` and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `root_as_message_unchecked`.
pub fn root_as_message_with_opts<'b, 'o>(
opts: &'o flatbuffers::VerifierOptions,
buf: &'b [u8],
) -> Result<Message<'b>, flatbuffers::InvalidFlatbuffer> {
flatbuffers::root_with_opts::<Message<'b>>(opts, buf)
}
#[inline]
/// Verifies, with the given verifier options, that a buffer of
/// bytes contains a size prefixed `Message` and returns
/// it. Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `root_as_message_unchecked`.
pub fn size_prefixed_root_as_message_with_opts<'b, 'o>(
opts: &'o flatbuffers::VerifierOptions,
buf: &'b [u8],
) -> Result<Message<'b>, flatbuffers::InvalidFlatbuffer> {
flatbuffers::size_prefixed_root_with_opts::<Message<'b>>(opts, buf)
}
#[inline]
/// Assumes, without verification, that a buffer of bytes contains a Message and returns it.
/// # Safety
/// Callers must trust the given bytes do indeed contain a valid `Message`.
pub unsafe fn root_as_message_unchecked(buf: &[u8]) -> Message {
flatbuffers::root_unchecked::<Message>(buf)
}
#[inline]
/// Assumes, without verification, that a buffer of bytes contains a size prefixed Message and returns it.
/// # Safety
/// Callers must trust the given bytes do indeed contain a valid size prefixed `Message`.
pub unsafe fn size_prefixed_root_as_message_unchecked(buf: &[u8]) -> Message {
flatbuffers::size_prefixed_root_unchecked::<Message>(buf)
}
#[inline]
pub fn finish_message_buffer<'a, 'b>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
root: flatbuffers::WIPOffset<Message<'a>>,
) {
fbb.finish(root, None);
}
#[inline]
pub fn finish_size_prefixed_message_buffer<'a, 'b>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
root: flatbuffers::WIPOffset<Message<'a>>,
) {
fbb.finish_size_prefixed(root, None);
} | |
gitlab_test.go | package providers
import (
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/pusher/oauth2_proxy/pkg/apis/sessions"
"github.com/stretchr/testify/assert"
)
func testGitLabProvider(hostname string) *GitLabProvider {
p := NewGitLabProvider(
&ProviderData{
ProviderName: "",
LoginURL: &url.URL{},
RedeemURL: &url.URL{},
ProfileURL: &url.URL{},
ValidateURL: &url.URL{},
Scope: ""})
if hostname != "" {
updateURL(p.Data().LoginURL, hostname)
updateURL(p.Data().RedeemURL, hostname)
updateURL(p.Data().ProfileURL, hostname)
updateURL(p.Data().ValidateURL, hostname)
}
return p
}
func testGitLabBackend() *httptest.Server |
func TestGitLabProviderBadToken(t *testing.T) {
b := testGitLabBackend()
defer b.Close()
bURL, _ := url.Parse(b.URL)
p := testGitLabProvider(bURL.Host)
session := &sessions.SessionState{AccessToken: "unexpected_gitlab_access_token"}
_, err := p.GetEmailAddress(session)
assert.NotEqual(t, nil, err)
}
func TestGitLabProviderUnverifiedEmailDenied(t *testing.T) {
b := testGitLabBackend()
defer b.Close()
bURL, _ := url.Parse(b.URL)
p := testGitLabProvider(bURL.Host)
session := &sessions.SessionState{AccessToken: "gitlab_access_token"}
_, err := p.GetEmailAddress(session)
assert.NotEqual(t, nil, err)
}
func TestGitLabProviderUnverifiedEmailAllowed(t *testing.T) {
b := testGitLabBackend()
defer b.Close()
bURL, _ := url.Parse(b.URL)
p := testGitLabProvider(bURL.Host)
p.AllowUnverifiedEmail = true
session := &sessions.SessionState{AccessToken: "gitlab_access_token"}
email, err := p.GetEmailAddress(session)
assert.Equal(t, nil, err)
assert.Equal(t, "[email protected]", email)
}
func TestGitLabProviderUsername(t *testing.T) {
b := testGitLabBackend()
defer b.Close()
bURL, _ := url.Parse(b.URL)
p := testGitLabProvider(bURL.Host)
p.AllowUnverifiedEmail = true
session := &sessions.SessionState{AccessToken: "gitlab_access_token"}
username, err := p.GetUserName(session)
assert.Equal(t, nil, err)
assert.Equal(t, "FooBar", username)
}
func TestGitLabProviderGroupMembershipValid(t *testing.T) {
b := testGitLabBackend()
defer b.Close()
bURL, _ := url.Parse(b.URL)
p := testGitLabProvider(bURL.Host)
p.AllowUnverifiedEmail = true
p.Group = "foo"
session := &sessions.SessionState{AccessToken: "gitlab_access_token"}
email, err := p.GetEmailAddress(session)
assert.Equal(t, nil, err)
assert.Equal(t, "[email protected]", email)
}
func TestGitLabProviderGroupMembershipMissing(t *testing.T) {
b := testGitLabBackend()
defer b.Close()
bURL, _ := url.Parse(b.URL)
p := testGitLabProvider(bURL.Host)
p.AllowUnverifiedEmail = true
p.Group = "baz"
session := &sessions.SessionState{AccessToken: "gitlab_access_token"}
_, err := p.GetEmailAddress(session)
assert.NotEqual(t, nil, err)
}
func TestGitLabProviderEmailDomainValid(t *testing.T) {
b := testGitLabBackend()
defer b.Close()
bURL, _ := url.Parse(b.URL)
p := testGitLabProvider(bURL.Host)
p.AllowUnverifiedEmail = true
p.EmailDomains = []string{"bar.com"}
session := &sessions.SessionState{AccessToken: "gitlab_access_token"}
email, err := p.GetEmailAddress(session)
assert.Equal(t, nil, err)
assert.Equal(t, "[email protected]", email)
}
func TestGitLabProviderEmailDomainInvalid(t *testing.T) {
b := testGitLabBackend()
defer b.Close()
bURL, _ := url.Parse(b.URL)
p := testGitLabProvider(bURL.Host)
p.AllowUnverifiedEmail = true
p.EmailDomains = []string{"baz.com"}
session := &sessions.SessionState{AccessToken: "gitlab_access_token"}
_, err := p.GetEmailAddress(session)
assert.NotEqual(t, nil, err)
}
| {
userInfo := `
{
"nickname": "FooBar",
"email": "[email protected]",
"email_verified": false,
"groups": ["foo", "bar"]
}
`
authHeader := "Bearer gitlab_access_token"
return httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/oauth/userinfo" {
if r.Header["Authorization"][0] == authHeader {
w.WriteHeader(200)
w.Write([]byte(userInfo))
} else {
w.WriteHeader(401)
}
} else {
w.WriteHeader(404)
}
}))
} |
upgrade-pilet.ts | import { resolve } from 'path'; | checkExistingDirectory,
patchPiletPackage,
copyPiralFiles,
getFileStats,
readPiralPackage,
getPiletsInfo,
runScript,
installDependencies,
getCurrentPackageDetails,
checkAppShellPackage,
defaultCacheDir,
removeDirectory,
setLogLevel,
progress,
fail,
log,
} from '../common';
export interface UpgradePiletOptions {
version?: string;
target?: string;
forceOverwrite?: ForceOverwrite;
logLevel?: LogLevels;
}
export const upgradePiletDefaults: UpgradePiletOptions = {
version: undefined,
target: '.',
forceOverwrite: ForceOverwrite.no,
logLevel: LogLevels.info,
};
export async function upgradePilet(baseDir = process.cwd(), options: UpgradePiletOptions = {}) {
const {
version = upgradePiletDefaults.version,
target = upgradePiletDefaults.target,
forceOverwrite = upgradePiletDefaults.forceOverwrite,
logLevel = upgradePiletDefaults.logLevel,
} = options;
setLogLevel(logLevel);
const root = resolve(baseDir, target);
const cache = resolve(root, defaultCacheDir);
const valid = await checkExistingDirectory(root);
if (!valid) {
fail('invalidPiletTarget_0040');
}
const pckg = await readJson(root, 'package.json');
const { devDependencies = {}, piral } = pckg;
if (piral && typeof piral === 'object') {
const sourceName = piral.name;
if (!sourceName || typeof sourceName !== 'string') {
fail('invalidPiletPackage_0042');
}
const currentVersion = devDependencies[sourceName];
if (!currentVersion || typeof currentVersion !== 'string') {
fail('invalidPiralReference_0043');
}
const [packageRef, packageVersion] = await getCurrentPackageDetails(baseDir, sourceName, currentVersion, version);
const originalFiles = await getFileStats(root, sourceName);
progress(`Updating NPM package to %s ...`, packageRef);
await installPackage(packageRef, root, '--no-save', '--no-package-lock');
const piralInfo = await readPiralPackage(root, sourceName);
checkAppShellPackage(piralInfo);
const { preUpgrade, postUpgrade } = getPiletsInfo(piralInfo);
if (preUpgrade) {
progress(`Running preUpgrade script ...`);
log('generalDebug_0003', `Run: ${preUpgrade}`);
await runScript(preUpgrade, root);
}
progress(`Taking care of templating ...`);
await patchPiletPackage(root, sourceName, packageVersion, piralInfo);
await copyPiralFiles(root, sourceName, forceOverwrite, originalFiles);
progress(`Updating dependencies ...`);
await installDependencies(root, '--no-package-lock');
if (postUpgrade) {
progress(`Running postUpgrade script ...`);
log('generalDebug_0003', `Run: ${postUpgrade}`);
await runScript(postUpgrade, root);
}
await removeDirectory(cache);
} else {
fail('invalidPiletPackage_0041');
}
} | import { LogLevels, ForceOverwrite } from '../types';
import {
readJson,
installPackage, |
zoekt.go | package graphqlbackend
import (
"context"
"fmt"
"math"
"net/url"
"regexp/syntax"
"strings"
"time"
"unicode/utf8"
"github.com/google/zoekt"
zoektquery "github.com/google/zoekt/query"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/gituri"
"github.com/sourcegraph/sourcegraph/internal/search"
searchbackend "github.com/sourcegraph/sourcegraph/internal/search/backend"
"github.com/sourcegraph/sourcegraph/internal/symbols/protocol"
"github.com/sourcegraph/sourcegraph/internal/trace"
)
func zoektResultCountFactor(numRepos int, query *search.TextPatternInfo) int {
// If we're only searching a small number of repositories, return more comprehensive results. This is
// arbitrary.
k := 1
switch {
case numRepos <= 5:
k = 100
case numRepos <= 10:
k = 10
case numRepos <= 25:
k = 8
case numRepos <= 50:
k = 5
case numRepos <= 100:
k = 3
case numRepos <= 500:
k = 2
}
if query.FileMatchLimit > defaultMaxSearchResults {
k = int(float64(k) * 3 * float64(query.FileMatchLimit) / float64(defaultMaxSearchResults))
}
return k
}
func zoektSearchOpts(k int, query *search.TextPatternInfo) zoekt.SearchOptions {
searchOpts := zoekt.SearchOptions{
MaxWallTime: defaultTimeout,
ShardMaxMatchCount: 100 * k,
TotalMaxMatchCount: 100 * k,
ShardMaxImportantMatch: 15 * k,
TotalMaxImportantMatch: 25 * k,
MaxDocDisplayCount: 2 * defaultMaxSearchResults,
}
// We want zoekt to return more than FileMatchLimit results since we use
// the extra results to populate reposLimitHit. Additionally the defaults
// are very low, so we always want to return at least 2000.
if query.FileMatchLimit > defaultMaxSearchResults {
searchOpts.MaxDocDisplayCount = 2 * int(query.FileMatchLimit)
}
if searchOpts.MaxDocDisplayCount < 2000 {
searchOpts.MaxDocDisplayCount = 2000
}
if userProbablyWantsToWaitLonger := query.FileMatchLimit > defaultMaxSearchResults; userProbablyWantsToWaitLonger {
searchOpts.MaxWallTime *= time.Duration(3 * float64(query.FileMatchLimit) / float64(defaultMaxSearchResults))
}
return searchOpts
}
var errNoResultsInTimeout = errors.New("no results found in specified timeout")
// zoektSearchHEAD searches repositories using zoekt.
//
// Timeouts are reported through the context, and as a special case errNoResultsInTimeout
// is returned if no results are found in the given timeout (instead of the more common
// case of finding partial or full results in the given timeout).
func zoektSearchHEAD(ctx context.Context, args *search.TextParameters, repos []*search.RepositoryRevisions, isSymbol bool, since func(t time.Time) time.Duration) (fm []*FileMatchResolver, limitHit bool, reposLimitHit map[string]struct{}, err error) {
if len(repos) == 0 {
return nil, false, nil, nil
}
// Tell zoekt which repos to search
repoSet := &zoektquery.RepoSet{Set: make(map[string]bool, len(repos))}
repoMap := make(map[string]*search.RepositoryRevisions, len(repos))
for _, repoRev := range repos {
repoSet.Set[string(repoRev.Repo.Name)] = true
repoMap[string(repoRev.Repo.Name)] = repoRev
}
queryExceptRepos, err := queryToZoektQuery(args.PatternInfo, isSymbol)
if err != nil {
return nil, false, nil, err
}
finalQuery := zoektquery.NewAnd(repoSet, queryExceptRepos)
tr, ctx := trace.New(ctx, "zoekt.Search", fmt.Sprintf("%d %+v", len(repoSet.Set), finalQuery.String()))
defer func() {
tr.SetError(err)
if len(fm) > 0 {
tr.LazyPrintf("%d file matches", len(fm))
}
tr.Finish()
}()
k := zoektResultCountFactor(len(repos), args.PatternInfo)
searchOpts := zoektSearchOpts(k, args.PatternInfo)
if args.UseFullDeadline {
// If the user manually specified a timeout, allow zoekt to use all of the remaining timeout.
deadline, _ := ctx.Deadline()
searchOpts.MaxWallTime = time.Until(deadline)
// We don't want our context's deadline to cut off zoekt so that we can get the results
// found before the deadline.
//
// We'll create a new context that gets cancelled if the other context is cancelled for any
// reason other than the deadline being exceeded. This essentially means the deadline for the new context
// will be `deadline + time for zoekt to cancel + network latency`.
cNew, cancel := context.WithCancel(context.Background())
go func(cOld context.Context) {
<-cOld.Done()
// cancel the new context if the old one is done for some reason other than the deadline passing.
if cOld.Err() != context.DeadlineExceeded {
cancel()
}
}(ctx)
ctx = cNew
defer cancel()
}
// If the query has a `repohasfile` or `-repohasfile` flag, we want to construct a new reposet based
// on the values passed in to the flag.
newRepoSet, err := createNewRepoSetWithRepoHasFileInputs(ctx, args.PatternInfo, args.Zoekt.Client, repoSet)
if err != nil {
return nil, false, nil, err
}
finalQuery = zoektquery.NewAnd(newRepoSet, queryExceptRepos)
tr.LazyPrintf("after repohasfile filters: nRepos=%d query=%v", len(newRepoSet.Set), finalQuery)
t0 := time.Now()
resp, err := args.Zoekt.Client.Search(ctx, finalQuery, &searchOpts)
if err != nil {
return nil, false, nil, err
}
if resp.FileCount == 0 && resp.MatchCount == 0 && since(t0) >= searchOpts.MaxWallTime {
return nil, false, nil, errNoResultsInTimeout
}
limitHit = resp.FilesSkipped+resp.ShardsSkipped > 0
// Repositories that weren't fully evaluated because they hit the Zoekt or Sourcegraph file match limits.
reposLimitHit = make(map[string]struct{})
if limitHit {
// Zoekt either did not evaluate some files in repositories, or ignored some repositories altogether.
// In this case, we can't be sure that we have exhaustive results for _any_ repository. So, all file
// matches are from repos with potentially skipped matches.
for _, file := range resp.Files {
if _, ok := reposLimitHit[file.Repository]; !ok {
reposLimitHit[file.Repository] = struct{}{}
}
}
}
if len(resp.Files) == 0 {
return nil, false, nil, nil
}
maxLineMatches := 25 + k
maxLineFragmentMatches := 3 + k
if limit := int(args.PatternInfo.FileMatchLimit); len(resp.Files) > limit {
// List of files we cut out from the Zoekt response because they exceed the file match limit on the Sourcegraph end.
// We use this to get a list of repositories that do not have complete results.
fileMatchesInSkippedRepos := resp.Files[limit:]
resp.Files = resp.Files[:limit]
if !limitHit {
// Zoekt evaluated all files and repositories, but Zoekt returned more file matches
// than the limit we set on Sourcegraph, so we cut out more results.
// Generate a list of repositories that had results cut because they exceeded the file match limit set on Sourcegraph.
for _, file := range fileMatchesInSkippedRepos {
if _, ok := reposLimitHit[file.Repository]; !ok {
reposLimitHit[file.Repository] = struct{}{}
}
}
}
limitHit = true
}
matches := make([]*FileMatchResolver, len(resp.Files))
repoResolvers := make(RepositoryResolverCache)
for i, file := range resp.Files {
fileLimitHit := false
if len(file.LineMatches) > maxLineMatches {
file.LineMatches = file.LineMatches[:maxLineMatches]
fileLimitHit = true
limitHit = true
}
repoRev := repoMap[file.Repository]
if repoResolvers[repoRev.Repo.Name] == nil {
repoResolvers[repoRev.Repo.Name] = &RepositoryResolver{repo: repoRev.Repo}
}
inputRev := repoRev.RevSpecs()[0]
// symbols is set in symbols search, lines in text search.
var (
symbols []*searchSymbolResult
lines []*lineMatch
matchCount int
)
if !isSymbol {
lines, matchCount = zoektFileMatchToLineMatches(maxLineFragmentMatches, &file)
} else {
symbols = zoektFileMatchToSymbolResults(repoResolvers[repoRev.Repo.Name], inputRev, &file)
}
matches[i] = &FileMatchResolver{
JPath: file.FileName,
JLineMatches: lines,
JLimitHit: fileLimitHit,
MatchCount: matchCount, // We do not use resp.MatchCount because it counts the number of lines matched, not the number of fragments.
uri: fileMatchURI(repoRev.Repo.Name, "", file.FileName),
symbols: symbols,
Repo: repoResolvers[repoRev.Repo.Name],
CommitID: api.CommitID(file.Version),
}
}
return matches, limitHit, reposLimitHit, nil
}
func zoektFileMatchToLineMatches(maxLineFragmentMatches int, file *zoekt.FileMatch) ([]*lineMatch, int) {
var matchCount int
lines := make([]*lineMatch, 0, len(file.LineMatches))
for _, l := range file.LineMatches {
if l.FileName {
continue
}
if len(l.LineFragments) > maxLineFragmentMatches {
l.LineFragments = l.LineFragments[:maxLineFragmentMatches]
}
offsets := make([][2]int32, len(l.LineFragments))
for k, m := range l.LineFragments {
offset := utf8.RuneCount(l.Line[:m.LineOffset])
length := utf8.RuneCount(l.Line[m.LineOffset : m.LineOffset+m.MatchLength])
offsets[k] = [2]int32{int32(offset), int32(length)}
}
matchCount += len(offsets)
lines = append(lines, &lineMatch{
JPreview: string(l.Line),
JLineNumber: int32(l.LineNumber - 1),
JOffsetAndLengths: offsets,
})
}
return lines, matchCount
}
func zoektFileMatchToSymbolResults(repo *RepositoryResolver, inputRev string, file *zoekt.FileMatch) []*searchSymbolResult {
// Symbol search returns a resolver so we need to pass in some
// extra stuff. This is a sign that we can probably restructure
// resolvers to avoid this.
baseURI := &gituri.URI{URL: url.URL{Scheme: "git", Host: repo.Name(), RawQuery: url.QueryEscape(inputRev)}}
commit := &GitCommitResolver{
repoResolver: repo,
oid: GitObjectID(file.Version),
inputRev: &inputRev,
}
lang := strings.ToLower(file.Language)
symbols := make([]*searchSymbolResult, 0, len(file.LineMatches))
for _, l := range file.LineMatches {
if l.FileName {
continue
}
for _, m := range l.LineFragments {
if m.SymbolInfo == nil {
continue
}
symbols = append(symbols, &searchSymbolResult{
symbol: protocol.Symbol{
Name: m.SymbolInfo.Sym,
Kind: m.SymbolInfo.Kind,
Parent: m.SymbolInfo.Parent,
ParentKind: m.SymbolInfo.ParentKind,
Path: file.FileName,
Line: l.LineNumber,
},
lang: lang,
baseURI: baseURI,
commit: commit,
})
}
}
return symbols
}
// createNewRepoSetWithRepoHasFileInputs mutates repoSet such that it accounts
// for the `repohasfile` and `-repohasfile` flags that may have been passed in
// the query. As a convenience it returns the mutated RepoSet.
func createNewRepoSetWithRepoHasFileInputs(ctx context.Context, query *search.TextPatternInfo, searcher zoekt.Searcher, repoSet *zoektquery.RepoSet) (*zoektquery.RepoSet, error) {
// Shortcut if we have no repos to search
if len(repoSet.Set) == 0 {
return repoSet, nil
}
flagIsInQuery := len(query.FilePatternsReposMustInclude) > 0
negatedFlagIsInQuery := len(query.FilePatternsReposMustExclude) > 0
// Construct queries which search for repos containing the files passed into `repohasfile`
filesToIncludeQueries, err := queryToZoektFileOnlyQueries(query, query.FilePatternsReposMustInclude)
if err != nil {
return nil, err
}
newSearchOpts := zoekt.SearchOptions{
ShardMaxMatchCount: 1,
TotalMaxMatchCount: math.MaxInt32,
ShardMaxImportantMatch: 1,
TotalMaxImportantMatch: math.MaxInt32,
MaxDocDisplayCount: 0,
}
newSearchOpts.SetDefaults()
if flagIsInQuery {
for _, q := range filesToIncludeQueries {
// Shortcut if we have no repos to search
if len(repoSet.Set) == 0 {
return repoSet, nil
}
// Execute a new Zoekt search for each file passed in to a `repohasfile` flag.
includeResp, err := searcher.Search(ctx, zoektquery.NewAnd(repoSet, q), &newSearchOpts)
if err != nil {
return nil, errors.Wrapf(err, "searching for %v", q.String())
}
newRepoSet := make(map[string]bool, len(includeResp.RepoURLs))
for repoURL := range includeResp.RepoURLs {
newRepoSet[repoURL] = true
}
// We want repoSet = repoSet intersect newRepoSet. but newRepoSet
// is a subset, so we can just set repoSet = newRepoSet.
repoSet.Set = newRepoSet
}
}
// Construct queries which search for repos containing the files passed into `-repohasfile`
filesToExcludeQueries, err := queryToZoektFileOnlyQueries(query, query.FilePatternsReposMustExclude)
if err != nil {
return nil, err
}
if negatedFlagIsInQuery {
for _, q := range filesToExcludeQueries {
// Shortcut if we have no repos to search
if len(repoSet.Set) == 0 {
return repoSet, nil
}
excludeResp, err := searcher.Search(ctx, zoektquery.NewAnd(repoSet, q), &newSearchOpts)
if err != nil {
return nil, err
}
for repoURL := range excludeResp.RepoURLs {
// For each repo that had a result in the exclude set, if it exists in the repoSet, set the value to false so we don't search over it.
if repoSet.Set[repoURL] {
delete(repoSet.Set, repoURL)
}
}
}
}
return repoSet, nil
}
func noOpAnyChar(re *syntax.Regexp) {
if re.Op == syntax.OpAnyChar {
re.Op = syntax.OpAnyCharNotNL
}
for _, s := range re.Sub {
noOpAnyChar(s)
}
}
func parseRe(pattern string, filenameOnly bool, queryIsCaseSensitive bool) (zoektquery.Q, error) {
// these are the flags used by zoekt, which differ to searcher.
re, err := syntax.Parse(pattern, syntax.ClassNL|syntax.PerlX|syntax.UnicodeGroups)
if err != nil {
return nil, err
}
noOpAnyChar(re)
// zoekt decides to use its literal optimization at the query parser
// level, so we check if our regex can just be a literal.
if re.Op == syntax.OpLiteral |
return &zoektquery.Regexp{
Regexp: re,
CaseSensitive: queryIsCaseSensitive,
FileName: filenameOnly,
}, nil
}
func fileRe(pattern string, queryIsCaseSensitive bool) (zoektquery.Q, error) {
return parseRe(pattern, true, queryIsCaseSensitive)
}
func queryToZoektQuery(query *search.TextPatternInfo, isSymbol bool) (zoektquery.Q, error) {
var and []zoektquery.Q
var q zoektquery.Q
var err error
if query.IsRegExp {
fileNameOnly := query.PatternMatchesPath && !query.PatternMatchesContent
q, err = parseRe(query.Pattern, fileNameOnly, query.IsCaseSensitive)
if err != nil {
return nil, err
}
} else {
q = &zoektquery.Substring{
Pattern: query.Pattern,
CaseSensitive: query.IsCaseSensitive,
FileName: true,
Content: true,
}
}
if isSymbol {
q = &zoektquery.Symbol{
Expr: q,
}
}
and = append(and, q)
// zoekt also uses regular expressions for file paths
// TODO PathPatternsAreCaseSensitive
// TODO whitespace in file path patterns?
if !query.PathPatternsAreRegExps {
return nil, errors.New("zoekt only supports regex path patterns")
}
for _, p := range query.IncludePatterns {
q, err := fileRe(p, query.IsCaseSensitive)
if err != nil {
return nil, err
}
and = append(and, q)
}
if query.ExcludePattern != "" {
q, err := fileRe(query.ExcludePattern, query.IsCaseSensitive)
if err != nil {
return nil, err
}
and = append(and, &zoektquery.Not{Child: q})
}
return zoektquery.Simplify(zoektquery.NewAnd(and...)), nil
}
// queryToZoektFileOnlyQueries constructs a list of Zoekt queries that search for a file pattern(s).
// `listOfFilePaths` specifies which field on `query` should be the list of file patterns to look for.
// A separate zoekt query is created for each file path that should be searched.
func queryToZoektFileOnlyQueries(query *search.TextPatternInfo, listOfFilePaths []string) ([]zoektquery.Q, error) {
var zoektQueries []zoektquery.Q
if !query.PathPatternsAreRegExps {
return nil, errors.New("zoekt only supports regex path patterns")
}
for _, p := range listOfFilePaths {
q, err := fileRe(p, query.IsCaseSensitive)
if err != nil {
return nil, err
}
zoektQueries = append(zoektQueries, zoektquery.Simplify(q))
}
return zoektQueries, nil
}
// zoektIndexedRepos splits the input repo list into two parts: (1) the
// repositories `indexed` by Zoekt and (2) the repositories that are
// `unindexed`.
func zoektIndexedRepos(ctx context.Context, z *searchbackend.Zoekt, revs []*search.RepositoryRevisions, filter func(*zoekt.Repository) bool) (indexed, unindexed []*search.RepositoryRevisions, err error) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
set, err := z.ListAll(ctx)
if err != nil {
return nil, nil, err
}
// PERF: If len(revs) is large, we expect to be doing an indexed
// search. So set indexed to the max size it can be to avoid growing.
indexed = make([]*search.RepositoryRevisions, 0, len(revs))
unindexed = make([]*search.RepositoryRevisions, 0)
for _, reporev := range revs {
repo, ok := set[string(reporev.Repo.Name)]
if !ok || (filter != nil && !filter(repo)) {
unindexed = append(unindexed, reporev)
continue
}
revspecs := reporev.RevSpecs()
if len(revspecs) != len(reporev.Revs) {
// Contains a RefGlob or ExcludeRefGlob so we can't do indexed
// search on it.
unindexed = append(unindexed, reporev)
continue
}
branches := make([]string, 0, len(revspecs))
for _, rev := range revspecs {
if rev == "" || rev == "HEAD" {
// Zoekt convention that first branch is HEAD
branches = append(branches, repo.Branches[0].Name)
continue
}
for _, branch := range repo.Branches {
if branch.Name == rev {
branches = append(branches, branch.Name)
break
}
// Check if rev is an abbrev commit SHA
if len(rev) >= 4 && strings.HasPrefix(branch.Version, rev) {
branches = append(branches, branch.Name)
}
}
}
// Only search zoekt if we can search all revisions on it.
if len(branches) == len(revspecs) {
// TODO we should return the list of branches to search. Maybe
// create the zoektquery.RepoBranches map here?
indexed = append(indexed, reporev)
} else {
unindexed = append(unindexed, reporev)
}
}
return indexed, unindexed, nil
}
| {
return &zoektquery.Substring{
Pattern: string(re.Rune),
CaseSensitive: queryIsCaseSensitive,
FileName: filenameOnly,
}, nil
} |
cassandra_mgr.py | from cassandra.cluster import Cluster
class CassandraMgr:
"""
Manage orerations with Apache Cassandra.
"""
def | (self, config):
"""
Constructor.
:param config: configuration of the cluster of Apache Cassandra -> ip, replicator factor, replication class and
key space.
"""
self.ip = config['ip']
self.replication_factor = config["replication_factor"]
self.replication_class = config["replication_class"]
self.key_space = config["key_space"]
self.cluster = Cluster(self.ip)
def connect(self):
"""
Create a connection from the configuration passed in class constructor.
Creates a Keyspace an returns a session.
:return: session.
"""
session = self.cluster.connect()
cql_create_keyspace = """
CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : '%s', 'replication_factor' : %s }
""" % (self.key_space, self.replication_class, self.replication_factor)
try:
session.execute(cql_create_keyspace)
except Exception as e:
print(e)
try:
session.set_keyspace(self.key_space )
except Exception as e:
print(e)
return session
def disconnect(self, session):
"""
Finalise the session and cluster shutdown.
:param session: session
"""
session.shutdown()
self.cluster.shutdown()
@staticmethod
def create_table(session, table, fields, primary_key):
"""
Create an Apache Cassandra table.
:param session: session.
:param table: table to create.
:param fields: fields of the table.
:param primary_key: primary key of the table.
"""
fields_string = ", ".join(fields)
query = "CREATE TABLE IF NOT EXISTS %s (%s , PRIMARY KEY %s)" % (table, fields_string, primary_key)
try:
session.execute(query)
except Exception as e:
print(e)
@staticmethod
def insert_cassandra_from_df(session, table, columns_table, df):
"""
Insert a pandas dataframe into a Cassandra table.
:param session: session.
:param table: table where insert rows.
:param columns_table: columns of the table.
:param df: pandas dataframe to insert into the table.
"""
query = CassandraMgr.get_insert_query(table, columns_table)
for index, row in df.iterrows():
session.execute(query, (row[x] for x in df.columns))
@staticmethod
def select(session, fields, table, filters):
"""
Make a select to an apache Cassandra table.
:param session: session.
:param fields: projection of the select statement
:param table: table
:param filters: filters of the WHERE clause.
:return: list of rows of the request.
"""
fields_string = ", ".join(fields)
query = "select %s from %s WHERE %s" % (fields_string, table, filters)
try:
rows = session.execute(query)
except Exception as e:
print(e)
return rows
@staticmethod
def get_insert_query(table: str, columns):
"""
Builds an INSERT statement string.
:param table: table
:param columns: columns to insert.
:return: string with INSERT query.
"""
query = "INSERT INTO %s (%s) " % (table, ", ".join(columns))
query = query + " VALUES (" + ", ".join(["%s"] * len(columns)) + ") "
return query
@staticmethod
def drop_table(session, table):
"""
Drop an Apache Cassandra table.
:param session: session.
:param table: table to drop.
"""
query = "drop table %s" % table
try:
session.execute(query)
except Exception as e:
print(e)
| __init__ |
dcim_devices_partial_update_responses.go | // Code generated by go-swagger; DO NOT EDIT.
// Copyright 2020 The go-netbox Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package dcim
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
models "github.com/gorjan87/go-netbox/netbox/models"
)
// DcimDevicesPartialUpdateReader is a Reader for the DcimDevicesPartialUpdate structure.
type DcimDevicesPartialUpdateReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DcimDevicesPartialUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDcimDevicesPartialUpdateOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("unknown error", response, response.Code())
}
}
// NewDcimDevicesPartialUpdateOK creates a DcimDevicesPartialUpdateOK with default headers values
func | () *DcimDevicesPartialUpdateOK {
return &DcimDevicesPartialUpdateOK{}
}
/*DcimDevicesPartialUpdateOK handles this case with default header values.
DcimDevicesPartialUpdateOK dcim devices partial update o k
*/
type DcimDevicesPartialUpdateOK struct {
Payload *models.DeviceWithConfigContext
}
func (o *DcimDevicesPartialUpdateOK) Error() string {
return fmt.Sprintf("[PATCH /dcim/devices/{id}/][%d] dcimDevicesPartialUpdateOK %+v", 200, o.Payload)
}
func (o *DcimDevicesPartialUpdateOK) GetPayload() *models.DeviceWithConfigContext {
return o.Payload
}
func (o *DcimDevicesPartialUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.DeviceWithConfigContext)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| NewDcimDevicesPartialUpdateOK |
test_whats.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from whats import whats
def test_tellme():
| assert whats.tellme('美妙的新世界')
|
|
encoder.rs | extern crate crc32fast;
extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Read, Write};
use std::mem;
use std::result;
use crc32fast::Hasher as Crc32;
use crate::chunk;
use crate::common::{Info, ColorType, BitDepth, Compression};
use crate::filter::{FilterType, filter};
use crate::traits::WriteBytesExt;
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError {
fn description(&self) -> &str {
use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &dyn error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error {
io::Error::new(io::ErrorKind::Other, (&err as &dyn error::Error).description())
}
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w, info }
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
/// Set the color of the encoded image.
///
/// These correspond to the color types in the png IHDR data that will be written. The length
/// of the image data that is later supplied must match the color type, otherwise an error will
/// be emitted.
pub fn set_color(&mut self, color: ColorType) {
self.info.color_type = color;
}
/// Set the indicated depth of the image data.
pub fn set_depth(&mut self, depth: BitDepth) {
self.info.bit_depth = depth;
}
/// Set compression parameters.
///
/// Accepts a `Compression` or any type that can transform into a `Compression`. Notably `deflate::Compression` and
/// `deflate::CompressionOptions` which "just work".
pub fn set_compression<C: Into<Compression>>(&mut self, compression: C) {
self.info.compression = compression.into();
}
/// Set the used filter type.
///
/// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for
/// sample values based on the previous. For a potentially better compression ratio, at the
/// cost of more complex processing, try out [`FilterType::Paeth`].
///
/// [`FilterType::Sub`]: enum.FilterType.html#variant.Sub
/// [`FilterType::Paeth`]: enum.FilterType.html#variant.Paeth
pub fn set_filter(&mut self, filter: FilterType) {
self.info.filter = filter;
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
Writer { w, info }
}
fn init(mut self) -> Result<Self> {
self.w.write_all(&[137, 80, 78, 71, 13, 10, 26, 10])?;
let mut data = [0; 13];
(&mut data[..]).write_be(self.info.width)?;
(&mut data[4..]).write_be(self.info.height)?;
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
self.write_chunk(chunk::IHDR, &data)?;
Ok(self)
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.w.write_be(data.len() as u32)?;
self.w.write_all(&name)?;
self.w.write_all(data)?;
let mut crc = Crc32::new();
crc.update(&name);
crc.update(data);
self.w.write_be(crc.finalize())?;
Ok(())
}
/// Writes the image data.
pub fn | (&mut self, data: &[u8]) -> Result<()> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data_size != data.len() {
let message = format!("wrong data size, expected {} got {}", data_size, data.len());
return Err(EncodingError::Format(message.into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), self.info.compression.clone());
let filter_method = self.info.filter;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
zlib.write_all(&[filter_method as u8])?;
filter(filter_method, bpp, &prev, &mut current);
zlib.write_all(¤t)?;
mem::swap(&mut prev, &mut current);
}
self.write_chunk(chunk::IDAT, &zlib.finish()?)
}
/// Create an stream writer.
///
/// This allows you create images that do not fit
/// in memory. The default chunk size is 4K, use
/// `stream_writer_with_size` to set another chuck
/// size.
pub fn stream_writer(&mut self) -> StreamWriter<W> {
self.stream_writer_with_size(4 * 1024)
}
/// Create a stream writer with custom buffer size.
///
/// See `stream_writer`
pub fn stream_writer_with_size(&mut self, size: usize) -> StreamWriter<W> {
StreamWriter::new(self, size)
}
}
impl<W: Write> Drop for Writer<W> {
fn drop(&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
struct ChunkWriter<'a, W: Write> {
writer: &'a mut Writer<W>,
buffer: Vec<u8>,
index: usize,
}
impl<'a, W: Write> ChunkWriter<'a, W> {
fn new(writer: &'a mut Writer<W>, buf_len: usize) -> ChunkWriter<'a, W> {
ChunkWriter {
writer,
buffer: vec![0; buf_len],
index: 0,
}
}
}
impl<'a, W: Write> Write for ChunkWriter<'a, W> {
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
let written = buf.read(&mut self.buffer[self.index..])?;
self.index += written;
if self.index + 1 >= self.buffer.len() {
self.writer.write_chunk(chunk::IDAT, &self.buffer)?;
self.index = 0;
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
if self.index > 0 {
self.writer.write_chunk(chunk::IDAT, &self.buffer[..=self.index])?;
}
self.index = 0;
Ok(())
}
}
impl<'a, W: Write> Drop for ChunkWriter<'a, W> {
fn drop(&mut self) {
let _ = self.flush();
}
}
/// Streaming png writer
///
/// This may silently fail in the destructor, so it is a good idea to call
/// [`finish`](#method.finish) or [`flush`](https://doc.rust-lang.org/stable/std/io/trait.Write.html#tymethod.flush) before dropping.
pub struct StreamWriter<'a, W: Write> {
writer: deflate::write::ZlibEncoder<ChunkWriter<'a, W>>,
prev_buf: Vec<u8>,
curr_buf: Vec<u8>,
index: usize,
bpp: usize,
filter: FilterType,
}
impl<'a, W: Write> StreamWriter<'a, W> {
fn new(writer: &'a mut Writer<W>, buf_len: usize) -> StreamWriter<'a, W> {
let bpp = writer.info.bytes_per_pixel();
let in_len = writer.info.raw_row_length() - 1;
let filter = writer.info.filter;
let prev_buf = vec![0; in_len];
let curr_buf = vec![0; in_len];
let compression = writer.info.compression.clone();
let chunk_writer = ChunkWriter::new(writer, buf_len);
let zlib = deflate::write::ZlibEncoder::new(chunk_writer, compression);
StreamWriter {
writer: zlib,
index: 0,
prev_buf,
curr_buf,
bpp,
filter,
}
}
pub fn finish(mut self) -> Result<()> {
// TODO: call `writer.finish` somehow?
self.flush()?;
Ok(())
}
}
impl<'a, W: Write> Write for StreamWriter<'a, W> {
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
let written = buf.read(&mut self.curr_buf[self.index..])?;
self.index += written;
if self.index >= self.curr_buf.len() {
self.writer.write_all(&[self.filter as u8])?;
filter(self.filter, self.bpp, &self.prev_buf, &mut self.curr_buf);
self.writer.write_all(&self.curr_buf)?;
mem::swap(&mut self.prev_buf, &mut self.curr_buf);
self.index = 0;
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.flush()?;
if self.index > 0 {
let message = format!("wrong data size, got {} bytes too many", self.index);
return Err(EncodingError::Format(message.into()).into());
}
Ok(())
}
}
impl<'a, W: Write> Drop for StreamWriter<'a, W> {
fn drop(&mut self) {
let _ = self.flush();
}
}
#[cfg(test)]
mod tests {
use super::*;
extern crate glob;
use rand::{thread_rng, Rng};
use std::{io, cmp};
use std::io::Write;
use std::fs::File;
#[test]
fn roundtrip() {
// More loops = more random testing, but also more test wait time
for _ in 0..10 {
for path in glob::glob("tests/pngsuite/*.png").unwrap().map(|r| r.unwrap()) {
if path.file_name().unwrap().to_str().unwrap().starts_with("x") {
// x* files are expected to fail to decode
continue;
}
// Decode image
let decoder = crate::Decoder::new(File::open(path).unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
if info.line_size != 32 {
// TODO encoding only works with line size 32?
continue;
}
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut wrapper = RandomChunkWriter {
rng: thread_rng(),
w: &mut out
};
let mut encoder = Encoder::new(&mut wrapper, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = crate::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
}
}
}
#[test]
fn roundtrip_stream() {
// More loops = more random testing, but also more test wait time
for _ in 0..10 {
for path in glob::glob("tests/pngsuite/*.png").unwrap().map(|r| r.unwrap()) {
if path.file_name().unwrap().to_str().unwrap().starts_with("x") {
// x* files are expected to fail to decode
continue;
}
// Decode image
let decoder = crate::Decoder::new(File::open(path).unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
if info.line_size != 32 {
// TODO encoding only works with line size 32?
continue;
}
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut wrapper = RandomChunkWriter {
rng: thread_rng(),
w: &mut out
};
let mut encoder = Encoder::new(&mut wrapper, info.width, info.height).write_header().unwrap();
let mut stream_writer = encoder.stream_writer();
let mut outer_wrapper = RandomChunkWriter {
rng: thread_rng(),
w: &mut stream_writer
};
outer_wrapper.write_all(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = crate::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
}
}
}
#[test]
fn expect_error_on_wrong_image_len() -> Result<()> {
use std::io::Cursor;
let width = 10;
let height = 10;
let output = vec![0u8; 1024];
let writer = Cursor::new(output);
let mut encoder = Encoder::new(writer, width as u32, height as u32);
encoder.set_depth(BitDepth::Eight);
encoder.set_color(ColorType::RGB);
let mut png_writer = encoder.write_header()?;
let correct_image_size = width * height * 3;
let image = vec![0u8; correct_image_size + 1];
let result = png_writer.write_image_data(image.as_ref());
assert!(result.is_err());
Ok(())
}
/// A Writer that only writes a few bytes at a time
struct RandomChunkWriter<'a, R: Rng, W: Write + 'a> {
rng: R,
w: &'a mut W
}
impl<'a, R: Rng, W: Write + 'a> Write for RandomChunkWriter<'a, R, W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// choose a random length to write
let len = cmp::min(self.rng.gen_range(1, 50), buf.len());
self.w.write(&buf[0..len])
}
fn flush(&mut self) -> io::Result<()> {
self.w.flush()
}
}
}
| write_image_data |
test_MBus_connect.py | import sys
sys.path.append('../python-mbus')
import pytest
from mbus import MBus
@pytest.fixture
def | ():
return MBus.MBus(host="127.0.0.1")
def test_connect(mbus_tcp):
mbus_tcp.connect()
| mbus_tcp |
mmsc.go | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package minmaxsumcount // import "go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
import (
"context"
"sync"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
type (
// Aggregator aggregates events that form a distribution,
// keeping only the min, max, sum, and count.
Aggregator struct {
lock sync.Mutex
current state
checkpoint state
kind metric.NumberKind
}
state struct {
count metric.Number
sum metric.Number
min metric.Number
max metric.Number
}
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.MinMaxSumCount = &Aggregator{}
// New returns a new aggregator for computing the min, max, sum, and
// count. It does not compute quantile information other than Min and
// Max.
//
// This type uses a mutex for Update() and Checkpoint() concurrency.
func New(desc *metric.Descriptor) *Aggregator {
kind := desc.NumberKind()
return &Aggregator{
kind: kind,
current: state{
count: metric.NewUint64Number(0),
sum: kind.Zero(),
min: kind.Maximum(),
max: kind.Minimum(),
},
}
}
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (metric.Number, error) {
c.lock.Lock()
defer c.lock.Unlock()
return c.checkpoint.sum, nil
}
// Count returns the number of values in the checkpoint.
func (c *Aggregator) Count() (int64, error) {
c.lock.Lock()
defer c.lock.Unlock()
return c.checkpoint.count.CoerceToInt64(metric.Uint64NumberKind), nil
}
// Min returns the minimum value in the checkpoint.
// The error value aggregator.ErrNoData will be returned
// if there were no measurements recorded during the checkpoint.
func (c *Aggregator) Min() (metric.Number, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.checkpoint.count.IsZero(metric.Uint64NumberKind) {
return c.kind.Zero(), aggregator.ErrNoData
}
return c.checkpoint.min, nil
}
// Max returns the maximum value in the checkpoint. | func (c *Aggregator) Max() (metric.Number, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.checkpoint.count.IsZero(metric.Uint64NumberKind) {
return c.kind.Zero(), aggregator.ErrNoData
}
return c.checkpoint.max, nil
}
// Checkpoint saves the current state and resets the current state to
// the empty set.
func (c *Aggregator) Checkpoint(desc *metric.Descriptor) {
c.lock.Lock()
c.checkpoint, c.current = c.current, c.emptyState()
c.lock.Unlock()
}
func (c *Aggregator) emptyState() state {
kind := c.kind
return state{
count: metric.NewUint64Number(0),
sum: kind.Zero(),
min: kind.Maximum(),
max: kind.Minimum(),
}
}
// Update adds the recorded measurement to the current data set.
func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metric.Descriptor) error {
kind := desc.NumberKind()
c.lock.Lock()
defer c.lock.Unlock()
c.current.count.AddInt64(1)
c.current.sum.AddNumber(kind, number)
if number.CompareNumber(kind, c.current.min) < 0 {
c.current.min = number
}
if number.CompareNumber(kind, c.current.max) > 0 {
c.current.max = number
}
return nil
}
// Merge combines two data sets into one.
func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentMergeError(c, oa)
}
c.checkpoint.count.AddNumber(metric.Uint64NumberKind, o.checkpoint.count)
c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum)
if c.checkpoint.min.CompareNumber(desc.NumberKind(), o.checkpoint.min) > 0 {
c.checkpoint.min.SetNumber(o.checkpoint.min)
}
if c.checkpoint.max.CompareNumber(desc.NumberKind(), o.checkpoint.max) < 0 {
c.checkpoint.max.SetNumber(o.checkpoint.max)
}
return nil
} | // The error value aggregator.ErrNoData will be returned
// if there were no measurements recorded during the checkpoint. |
urls.py | """meusite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [ | path('sobre/', include('sobre.urls')),
] | path('', views.index),
path('admin/', admin.site.urls),
path('blog/', include('blog.urls')), |
Z_algorithm.py | #!/bin/python3
import sys, string
from random import *
from timeit import default_timer as timer
def randstr(N,alphabet=string.ascii_lowercase):
l=len(alphabet)
return "".join( alphabet[randint(0,l-1)] for _ in range(N))
def timefunc(func, *args, **kwargs):
"""Time a function.
args:
iterations=1
Usage example:
timeit(myfunc, 1, b=2)
"""
try:
iterations = kwargs.pop('iterations')
except KeyError:
iterations = 1
elapsed = sys.maxsize
start = timer()
for _ in range(iterations):
result = func(*args, **kwargs)
elapsed = (timer() - start)/iterations
print(('{}() : {:.9f}'.format(func.__name__, elapsed)))
return result
| def getZ_naive(S):
N=len(S)
Z=[0]*N
for i in range(1,N):
k=0
while( i+k<N and S[i+k]==S[k]):
k+=1
Z[i]=k
return Z
#int L = 0, R = 0;
#for (int i = 1; i < n; i++) {
# if (i > R) {
# L = R = i;
# while (R < n && s[R-L] == s[R]) R++;
# z[i] = R-L; R--;
# } else {
# int k = i-L;
# if (z[k] < R-i+1) z[i] = z[k];
# else {
# L = i;
# while (R < n && s[R-L] == s[R]) R++;
# z[i] = R-L; R--;
# }
# }
#}
def getZ_0(S):
N=len(S)
Z=[0]*N
L,R=0,0
for i in range(1,N):
if i>R:
L=R=i
while R<N and S[R-L]==S[R]:
R+=1
Z[i]=R-L
R-=1
else:
k=i-L
if Z[k]<R-i+1:
Z[i]=Z[k]
else:
L=i
while R<N and S[R-L]==S[R]:
R+=1
Z[i]=R-L
R-=1
return Z
#from rookie rank 4, dna
#not optimal....
def getZ_1(S):
N=len(S)
Z=[0]*N
L,R=-1,-1
for i in range(N):
if i<R:
Z[i]=min(R-i, Z[i-L])
while i+Z[i]<N and S[Z[i]]==S[i+Z[i]]:
Z[i]+=1
if i+Z[i]>R:
L=i
R=i+Z[i]
Z[0]=0 #due to it=N as result
return Z
#void z_func(string s){
# int n = s.length();
# int z[n];
# z[0] = 0;
#
# for (int i = 1, l = 0, r = 1; i < n; i++, r = i < r ? r : i)
# for (z[i] = min(r - i, z[i - l]); i + z[i]<n && s[i + z[i]] == s[z[i]]; z[i]++, r = i + z[i], l = i);
#
# for (int i = 0; i < n; i++)
# cout << z[i] << " ";
#}
def getZ_2(S):
N=len(S)
Z=[0]*N
i=1
L=0
R=1
while i<N:
Z[i]=min(R-i,Z[i-L])
while i+Z[i]<N and S[i+Z[i]]==S[Z[i]]:
Z[i]+=1
R=i+Z[i]
L=i
i+=1
if i>=R:
R=i
return Z
if __name__ == "__main__":
alpha="AB"
#alpha=string.ascii_lowercase
S = randstr(30,alphabet=alpha)
#S=['A']*10000
rep=1
print(S)
res0=timefunc( getZ_naive, S, iterations=rep)
print(res0)
res=timefunc( getZ_0, S, iterations=rep)
print(res0==res)
res=timefunc( getZ_1, S, iterations=rep)
print(res0==res)
res=timefunc( getZ_2, S, iterations=rep)
print(res0==res) | #res=timefunc( searchKMP, S, P, iterations=rep)
#if(res!=res0): print("Wrong")
|
window.rs | use crate::{
sf_bool_ext::SfBoolExt,
system::{SfStrConv, Vector2i, Vector2u},
window::{thread_safety, ContextSettings, Cursor, Event, Style, VideoMode},
};
use csfml_system_sys::sfBool;
use csfml_window_sys as ffi;
/// The system native window handle type. Can be used to create an SFML Window
/// from an existing system window.
pub type Handle = ffi::sfWindowHandle;
/// Window that serves as a target for OpenGL rendering.
///
/// `Window` is the main type of the window module.
///
/// It defines an OS window that is able to receive an OpenGL rendering.
///
/// The `Window` type provides a simple interface for manipulating the window:
/// move, resize, show/hide, control mouse cursor, etc.
/// It also provides event handling through [`Window::poll_event`] and [`Window::wait_event`].
///
/// Note that OpenGL experts can pass their own parameters
/// (antialiasing level, bits for the depth and stencil buffers, etc.) to the OpenGL context
/// attached to the window, with the [`ContextSettings`] structure which is passed as an
/// optional argument when creating the window.
///
/// # Usage example
///
/// ```no_run
/// use sfml::window::{Window, Event, Style};
/// // Create a new window
/// let mut window = Window::new((800, 600),
/// "SFML window",
/// Style::CLOSE,
/// &Default::default());
/// // Limit the framerate to 60 frames per second (this step is optional)
/// window.set_framerate_limit(60);
///
/// // The main loop - ends as soon as the window is closed
/// while window.is_open() {
/// // Event processing
/// while let Some(event) = window.poll_event() {
/// // Request closing for the window
/// if event == Event::Closed {
/// window.close();
/// }
/// }
///
/// // Activate the window for OpenGL rendering
/// window.set_active(true);
///
/// // OpenGL drawing commands go here...
///
/// // End the current frame and display its contents on screen
/// window.display();
/// }
/// ```
#[derive(Debug)]
pub struct Window {
window: *mut ffi::sfWindow,
}
impl Window {
/// Construct a new window
///
/// This function creates the window with the size and pixel
/// depth defined in mode. An optional style can be passed to
/// customize the look and behaviour of the window (borders,
/// title bar, resizable, closable, ...). If style contains
/// [`Style::FULLSCREEN`], then mode must be a valid video mode.
///
/// The fourth parameter is a pointer to a structure specifying
/// advanced OpenGL context settings such as antialiasing,
/// depth-buffer bits, etc.
///
/// # Arguments
/// * mode - Video mode to use (defines the width, height and depth of the
/// rendering area of the window)
/// * title - Title of the window
/// * style - Window style
/// * settings - Additional settings for the underlying OpenGL context
pub fn new<V: Into<VideoMode>, S: SfStrConv>(
mode: V,
title: S,
style: Style,
settings: &ContextSettings,
) -> Window {
thread_safety::set_window_thread();
let sf_win: *mut ffi::sfWindow = unsafe {
title.with_as_sfstr(|sfstr| {
ffi::sfWindow_createUnicode(
mode.into().raw(),
sfstr.as_ptr(),
style.bits(),
&settings.0,
)
})
};
assert!(!sf_win.is_null(), "Failed to create Window");
Window { window: sf_win }
}
/// Create a window from an existing platform-specific window handle
///
/// This function creates a window based on an existing platform specific
/// window handle which has been allocated outside of SFML. This is only
/// intended to be used in cases where you need to integrate SFML with some
/// other windowing library.
///
/// # Safety
///
/// It is the caller's responsibility to ensure that it is called with a valid window handle.
///
/// # Arguments
/// * handle - The handle to the platform-specific window handle to use for
/// the window.
/// * settings - Additional settings for the underlying OpenGL context
#[must_use]
pub unsafe fn from_handle(handle: Handle, settings: &ContextSettings) -> Window {
thread_safety::set_window_thread();
let sf_win: *mut ffi::sfWindow = ffi::sfWindow_createFromHandle(handle, &settings.0);
assert!(!sf_win.is_null(), "Failed to create Window");
Window { window: sf_win }
}
/// Get the OS-specific handle of the window.
///
/// The type of the returned handle is Handle, which is a typedef to the handle type defined by the OS.
/// You shouldn't need to use this function, unless you have very specific stuff to implement that SFML
/// doesn't support, or implement a temporary workaround until a bug is fixed.
#[must_use]
pub fn system_handle(&self) -> Handle {
unsafe { ffi::sfWindow_getSystemHandle(self.window) }
}
/// Pop the event on top of event queue, if any, and return it
///
/// This function is not blocking: if there's no pending event then
/// it will return `None`.
/// Note that more than one event may be present in the event queue,
/// thus you should always call this function in a loop
/// to make sure that you process every pending event.
///
/// Returns `Some(event)` if an event was returned, or `None` if the event queue was empty
pub fn poll_event(&mut self) -> Option<Event> {
let mut event = std::mem::MaybeUninit::uninit();
let have_event =
unsafe { ffi::sfWindow_pollEvent(self.window, event.as_mut_ptr()).into_bool() };
if have_event {
unsafe { Event::from_raw(&event.assume_init()) }
} else {
None
}
}
/// Wait for an event and return it
///
/// This function is blocking: if there's no pending event then
/// it will wait until an event is received.
///
/// This function is typically used when you have a thread that
/// is dedicated to events handling: you want to make this thread
/// sleep as long as no new event is received.
///
/// Returns `Some(event)` or `None` if an error has occured
pub fn wait_event(&mut self) -> Option<Event> {
let mut event = std::mem::MaybeUninit::uninit();
let have_event =
unsafe { ffi::sfWindow_waitEvent(self.window, event.as_mut_ptr()).into_bool() };
if have_event { | None
}
}
/// Change a window's icon
/// pixels must be an array of width x height pixels in 32-bits RGBA format.
///
/// # Arguments
/// * width - Icon's width, in pixels
/// * height - Icon's height, in pixels
/// * pixels - Vector of pixels
pub fn set_icon(&mut self, width: u32, height: u32, pixels: &[u8]) {
unsafe { ffi::sfWindow_setIcon(self.window, width, height, pixels.as_ptr()) }
}
/// Close a window and destroy all the attached resources
///
/// After calling this method, the Window object remains
/// valid.
/// All other functions such as [`Window::poll_event`] or [`Window::display`]
/// will still work (i.e. you don't have to test [`Window::is_open`]
/// every time), and will have no effect on closed windows.
pub fn close(&mut self) {
unsafe {
ffi::sfWindow_close(self.window);
}
}
/// Tell whether or not a window is opened
///
/// This function returns whether or not the window exists.
/// Note that a hidden window (`set_visible(false)`) will return
/// true.
#[must_use]
pub fn is_open(&self) -> bool {
unsafe { ffi::sfWindow_isOpen(self.window) }.into_bool()
}
/// Get the settings of the OpenGL context of a window
///
/// Note that these settings may be different from what was
/// passed to the [`Window::new`] function,
/// if one or more settings were not supported. In this case,
/// SFML chose the closest match.
///
/// Return a structure containing the OpenGL context settings
#[must_use]
pub fn settings(&self) -> ContextSettings {
unsafe { ContextSettings(ffi::sfWindow_getSettings(self.window)) }
}
/// Change the title of a window
///
/// # Arguments
/// * title - New title
pub fn set_title<S: SfStrConv>(&mut self, title: S) {
title.with_as_sfstr(|sfstr| unsafe {
ffi::sfWindow_setUnicodeTitle(self.window, sfstr.as_ptr())
})
}
/// Show or hide a window
///
/// # Arguments
/// * visible - true to show the window, false to hide it
pub fn set_visible(&mut self, visible: bool) {
unsafe { ffi::sfWindow_setVisible(self.window, sfBool::from_bool(visible)) }
}
/// Show or hide the mouse cursor
///
/// # Arguments
/// * visible - true to false to hide
pub fn set_mouse_cursor_visible(&mut self, visible: bool) {
unsafe { ffi::sfWindow_setMouseCursorVisible(self.window, sfBool::from_bool(visible)) }
}
/// Grab or release the mouse cursor.
///
/// If set, grabs the mouse cursor inside this window's client area so it may no longer be
/// moved outside its bounds. Note that grabbing is only active while the window has focus.
pub fn set_mouse_cursor_grabbed(&mut self, grabbed: bool) {
unsafe { ffi::sfWindow_setMouseCursorGrabbed(self.window, sfBool::from_bool(grabbed)) }
}
/// Enable or disable vertical synchronization
///
/// Activating vertical synchronization will limit the number
/// of frames displayed to the refresh rate of the monitor.
/// This can avoid some visual artifacts, and limit the framerate
/// to a good value (but not constant across different computers).
///
/// # Arguments
/// * enabled - true to enable v-sync, false to deactivate
pub fn set_vertical_sync_enabled(&mut self, enabled: bool) {
unsafe { ffi::sfWindow_setVerticalSyncEnabled(self.window, sfBool::from_bool(enabled)) }
}
/// Enable or disable automatic key-repeat
///
/// If key repeat is enabled, you will receive repeated
/// [`Event::KeyPressed`] events while keeping a key pressed. If it is disabled,
/// you will only get a single event when the key is pressed.
///
/// Key repeat is enabled by default.
///
/// # Arguments
/// * enabled - true to enable, false to disable
pub fn set_key_repeat_enabled(&mut self, enabled: bool) {
unsafe { ffi::sfWindow_setKeyRepeatEnabled(self.window, sfBool::from_bool(enabled)) }
}
/// Activate or deactivate a window as the current target for OpenGL rendering
///
/// A window is active only on the current thread, if you want to
/// make it active on another thread you have to deactivate it
/// on the previous thread first if it was active.
/// Only one window can be active on a thread at a time, thus
/// the window previously active (if any) automatically gets deactivated.
///
/// # Arguments
/// * active - true to activate, false to deactivate
///
/// Return true if operation was successful, false otherwise
pub fn set_active(&mut self, enabled: bool) -> bool {
unsafe { ffi::sfWindow_setActive(self.window, sfBool::from_bool(enabled)) }.into_bool()
}
/// Display on screen what has been rendered to the window so far
///
/// This function is typically called after all OpenGL rendering
/// has been done for the current frame, in order to show
/// it on screen.
pub fn display(&mut self) {
unsafe { ffi::sfWindow_display(self.window) }
}
/// Limit the framerate to a maximum fixed frequency
///
/// If a limit is set, the window will use a small delay after
/// each call to [`Window::display`] to ensure that the current frame
/// lasted long enough to match the framerate limit.
///
/// # Arguments
/// * limit - Framerate limit, in frames per seconds (use 0 to disable limit)
pub fn set_framerate_limit(&mut self, limit: u32) {
unsafe { ffi::sfWindow_setFramerateLimit(self.window, limit) }
}
/// Change the joystick threshold
///
/// The joystick threshold is the value below which
/// no [`Event::JoystickMoved`] event will be generated.
///
/// # Arguments
/// * threshold - New threshold, in the range [0, 100]
pub fn set_joystick_threshold(&mut self, threshold: f32) {
unsafe { ffi::sfWindow_setJoystickThreshold(self.window, threshold) }
}
/// Get the position of a window
///
/// Return the position in pixels
#[must_use]
pub fn position(&self) -> Vector2i {
unsafe { Vector2i::from_raw(ffi::sfWindow_getPosition(self.window)) }
}
/// Change the position of a window on screen
///
/// This function only works for top-level windows
/// (i.e. it will be ignored for windows created from
/// the handle of a child window/control).
///
/// # Arguments
/// * position - New position of the window, in pixels
pub fn set_position(&mut self, position: Vector2i) {
unsafe { ffi::sfWindow_setPosition(self.window, position.raw()) }
}
/// Get the size of the rendering region of a window
///
/// The size doesn't include the titlebar and borders of the window.
///
/// Return the size in pixels
#[must_use]
pub fn size(&self) -> Vector2u {
unsafe { Vector2u::from_raw(ffi::sfWindow_getSize(self.window)) }
}
/// Change the size of the rendering region of a window
///
/// # Arguments
/// * size - New size, in pixels
pub fn set_size(&mut self, size: Vector2u) {
unsafe { ffi::sfWindow_setSize(self.window, size.raw()) }
}
/// Returns the current position of the mouse relative to the window.
#[must_use]
pub fn mouse_position(&self) -> Vector2i {
unsafe { Vector2i::from_raw(ffi::sfMouse_getPosition(self.window)) }
}
/// Set the current position of the mouse
///
/// This function sets the current position of the mouse cursor relative to the given window.
///
/// # Arguments
/// * position - New position of the mouse
/// * relativeTo - Reference Window
///
pub fn set_mouse_position(&mut self, position: Vector2i) {
unsafe { ffi::sfMouse_setPosition(position.raw(), self.window) }
}
/// Set the displayed cursor to a native system cursor.
///
/// Upon window creation, the arrow cursor is used by default.
/// The cursor can not be destroyed while in use by the window.
pub fn set_mouse_cursor(&mut self, cursor: &Cursor) {
unsafe { ffi::sfWindow_setMouseCursor(self.window, cursor.raw()) }
}
/// Returns the current position of a touch in window coordinates.
#[must_use]
pub fn touch_position(&self, finger: u32) -> Vector2i {
unsafe { Vector2i::from_raw(ffi::sfTouch_getPosition(finger, self.window)) }
}
/// Check whether the window has the input focus.
///
/// At any given time, only one window may have the input focus to receive input events
/// such as keystrokes or most mouse events.
#[must_use]
pub fn has_focus(&self) -> bool {
unsafe { ffi::sfWindow_hasFocus(self.window).into_bool() }
}
/// Request the current window to be made the active foreground window.
///
/// At any given time, only one window may have the input focus to receive input events
/// such as keystrokes or mouse events. If a window requests focus, it only hints to the
/// operating system, that it would like to be focused. The operating system is free to
/// deny the request. This is not to be confused with [`Window::set_active`].
pub fn request_focus(&self) {
unsafe { ffi::sfWindow_requestFocus(self.window) }
}
#[cfg(feature = "graphics")]
pub(crate) fn raw(&self) -> *const ffi::sfWindow {
self.window
}
}
impl Drop for Window {
fn drop(&mut self) {
unsafe {
ffi::sfWindow_destroy(self.window);
}
}
} | unsafe { Event::from_raw(&event.assume_init()) }
} else { |
thread.py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging # noqa
from functools import partial # noqa
from multiprocessing.pool import ThreadPool as _ThreadPool # noqa
from django import db # noqa
from django.utils import timezone, translation # noqa
from apps.utils.local import activate_request, get_request # noqa
from .local import local # noqa
logger = logging.getLogger(__name__)
def run_func_with_local(items, tz, lang, request, func, *args, **kwargs):
"""
线程执行函数
:param request: added by jairwu API request
:param func: 待执行函数
:param items: Thread Local Items
:param tz: 时区
:param lang: 语言
:param args: 位置参数
:param kwargs: 关键字参数
:return: 函数返回值
"""
# 同步local数据
for item in items:
setattr(local, item[0], item[1])
# 设置时区及语言
timezone.activate(tz)
translation.activate(lang)
activate_request(request)
try:
data = func(*args, **kwargs)
except Exception as e:
raise e
finally:
# 关闭db连接
db.connections.close_all()
# 清理local数据
for item in local:
delattr(local, item[0])
return data
class ThreadPool(_ThreadPool):
"""
线程池
"""
@staticmethod
def get_func_with_local(func):
tz = timezone.get_current_timezone().zone
lang = translation.get_language()
items = [item for item in local]
request = get_request()
return partial(run_func_with_local, items, tz, lang, request, func)
def map_ignore_exception(self, func, iterable, return_exception=False):
"""
忽略错误版的map
"""
futures = []
for params in iterable:
if not isinstance(params, (tuple, list)):
params = (params,)
futures.append(self.apply_async(func, args=params))
results = []
for future in futures:
try:
results.append(future.get())
except Exception as e:
if return_exception:
results.append(e)
logger.exception(e)
return results
def map_async(self, func, iterable, chunksize=None, callback=None):
return super(ThreadPool, self).map_async(
self.get_func_with_local(func), iterable, chunksize=chunksize, callback=callback
)
def apply_async(self, func, args=(), kwds={}, callback=None):
return super(ThreadPool, self).apply_async(
self.get_func_with_local(func), args=args, kwds=kwds, callback=callback
)
def imap(self, func, iter | red(self, func, iterable, chunksize=1):
func = partial(run_func_with_local, func, local)
return super(ThreadPool, self).imap_unordered(self.get_func_with_local(func), iterable, chunksize=chunksize)
| able, chunksize=1):
return super(ThreadPool, self).imap(self.get_func_with_local(func), iterable, chunksize)
def imap_unorde |
api_raw.py | from .tool.func import *
def api_raw_2(conn, name):
curs = conn.cursor()
if acl_check(name, 'render') != 1:
curs.execute("select data from data where title = ?", [name])
data = curs.fetchall()
if data:
json_data = { "title" : name, "data" : render_set(title = name, data = data[0][0], s_data = 1) }
return flask.jsonify(json_data)
else:
|
else:
return flask.jsonify({}) | return flask.jsonify({}) |
EnlightenSystem.js | /**
* A wrapper for a particular system belonging to a user
* Should not be constructed manually, retrieve using EnlightenAPI.getServers()
*/
export default class EnlightenSystem {
constructor(parent, props) {
for (var fld in props) {
this[fld] = props[fld];
}
this.enphaseAPI = parent;
this.cachedStats = new Map();
}
/**
* Retrieves stats for power production for every 5 minute period. Will return cached values if they exist,
* and retrieve any missing values from the server making multiple calls if needed.
*
* @param {number=''} startAt unix timestamp for first stat wanted, will round down to nearest 5 min block
* @param {number=''} endAt unix timestamp for last stat wanted, will round down to nearest 5 min block
* @returns {Promise<Array<EnlightenStat>>} returns a promise for an array of stats objects
*/
getStats(startAt = '', endAt = '')
{
console.log('getStats()',startAt || '-',endAt || '-');
if (startAt) startAt = Math.floor(parseInt(startAt) / 300) * 300;
if (endAt) endAt = Math.floor(parseInt(endAt) / 300) * 300;
if (endAt && (!startAt || endAt < startAt)) endAt = '';
let at = startAt;
if(startAt) {
// Some of the records are available in cache
if (this.cachedStats.get(startAt))
{
let intervals = [];
if (!endAt) endAt = startAt + 30000;
while(this.cachedStats.get(at) && at < endAt)
{
intervals.push(this.cachedStats.get(at));
at += 300;
}
if (at >= endAt)
{
console.log('getStats() - Fullfilled by cache');
return new Promise(e => { e(intervals) });
}
// Fetch the remainder using API, append and return
console.log(`Retrieved cached ${startAt} - ${at-300}, fetching to ${endAt} with API`);
return this.getStats(at,endAt)
.then(stats => {
return intervals.concat(stats);
});
}
}
// No cache - Retrieve records from API, fetch as many as we can to conserve
// API usage requests, but only return those that were requested
console.log('getStats() - API Request',startAt,'-');
return this.api('stats', {
start_at: startAt,
})
.then(stats => {
console.log(`getStats() - Retrieved ${stats.intervals.length} stats from API`);
let last = '';
let intervals = [];
stats.intervals.forEach(statProps => {
let stat = new EnlightenStat(statProps);
this.cachedStats.set(stat.startAt,stat);
if (stat.endAt <= endAt)
intervals.push(stat);
last = stat.startAt;
});
if (last && last < endAt) {
// Fetch more records, use cached to fill in
console.log(`getStats() - API returned less records than desired ${last} < ${endAt}, retrieving remainder`);
return this.getStats(startAt, endAt);
}
return intervals
});
}
/**
* Convienience method for retrieving the amount of electricity generated
* during a specific period.
* @param {number} startAt unix timestamp, will be rounded down to nearest 5 min
* @param {number} endAt unix timestamp, will be rounded down to the nearest 5 min
* @returns {number} returns total WH (Watt Hours) generated during period in question
*/
getEnergyProduced(startAt, endAt) {
return this.getStats(startAt, endAt)
.then(stats => {
let total = 0;
stats.forEach(stat => {
total += stat.produced;
});
return total;
});
}
api(path, params) {
return this.enphaseAPI.api(`${this.system_id}/${path}`,params);
}
}
/**
* Internal carrier for stats call result data, do not directly alloc
* instead use EnlightenSystem.getStats() or related
*/
class | {
constructor(props) {
this.endAt = parseInt(props.end_at);
this.startAt = this.endAt - 300;
this.averagePower = parseInt(props.powr);
this.produced = parseInt(props.enwh);
this.devicesReporting = parseInt(props.devices_reporting);
}
} | EnlightenStat |
cluster_repository.go | package repository
import (
"context"
"strings"
"time"
"github.com/fabric8-services/fabric8-cluster/cluster"
"github.com/fabric8-services/fabric8-common/httpsupport"
"github.com/fabric8-services/fabric8-cluster/application/repository/base"
"github.com/fabric8-services/fabric8-common/errors"
"github.com/fabric8-services/fabric8-common/gormsupport"
"github.com/fabric8-services/fabric8-common/log"
"github.com/goadesign/goa"
"github.com/jinzhu/gorm"
errs "github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
"fmt"
)
// Cluster the struct that holds the cluster info
type Cluster struct {
gormsupport.LifecycleHardDelete
// This is the primary key value
ClusterID uuid.UUID `sql:"type:uuid default uuid_generate_v4()" gorm:"primary_key;column:cluster_id"`
// The name of the cluster
Name string `mapstructure:"name"`
// API URL of the cluster
URL string `sql:"unique_index" mapstructure:"api-url"`
// Console URL of the cluster
ConsoleURL string `mapstructure:"console-url" optional:"true"` // Optional in config file
// Metrics URL of the cluster
MetricsURL string `mapstructure:"metrics-url" optional:"true"` // Optional in config file
// Logging URL of the cluster
LoggingURL string `mapstructure:"logging-url" optional:"true"` // Optional in config file
// Application host name used by the cluster
AppDNS string `mapstructure:"app-dns"`
// Service Account token (encrypted or not, depending on the state of the sibling SATokenEncrypted field)
SAToken string `mapstructure:"service-account-token"`
// Service Account username
SAUsername string `mapstructure:"service-account-username"`
// SA Token encrypted
SATokenEncrypted bool `mapstructure:"service-account-token-encrypted" optional:"true" default:"true"` // Optional in config file
// Token Provider ID
TokenProviderID string `mapstructure:"token-provider-id"`
// OAuthClient ID used to link users account
AuthClientID string `mapstructure:"auth-client-id"`
// OAuthClient secret used to link users account
AuthClientSecret string `mapstructure:"auth-client-secret"`
// OAuthClient default scope used to link users account
AuthDefaultScope string `mapstructure:"auth-client-default-scope"`
// Cluster type. Such as OSD, OSO, OCP, etc
Type string `mapstructure:"type" optional:"true" default:"OSO"` // Optional in config file
// cluster capacity exhausted by default false
CapacityExhausted bool `mapstructure:"capacity-exhausted" optional:"true"` // Optional in config file
}
// Normalize fills the `console`, `metrics` and `logging` URL if there were missing,
// and appends a trailing slash if needed.
func (c *Cluster) Normalize() error {
// ensure that cluster URL ends with a slash
c.URL = httpsupport.AddTrailingSlashToURL(c.URL)
var err error
// fill missing values and ensures that all URLs have a trailing slash
// console URL
if strings.TrimSpace(c.ConsoleURL) == "" {
c.ConsoleURL, err = ConvertAPIURL(c.URL, "console", "console")
if err != nil {
return err
}
}
c.ConsoleURL = httpsupport.AddTrailingSlashToURL(c.ConsoleURL)
// metrics URL
if strings.TrimSpace(c.MetricsURL) == "" {
c.MetricsURL, err = ConvertAPIURL(c.URL, "metrics", "")
if err != nil {
return err
}
}
c.MetricsURL = httpsupport.AddTrailingSlashToURL(c.MetricsURL)
// logging URL
if strings.TrimSpace(c.LoggingURL) == "" {
// This is not a typo; the logging host is the same as the console host in current k8s
c.LoggingURL, err = ConvertAPIURL(c.URL, "console", "console")
if err != nil {
return err
}
}
c.LoggingURL = httpsupport.AddTrailingSlashToURL(c.LoggingURL)
// ensure that AppDNS URL ends with a slash
c.AppDNS = httpsupport.AddTrailingSlashToURL(c.AppDNS)
// apply default type of cluster
if c.Type == "" {
c.Type = cluster.OSO
}
return nil
}
// GormClusterRepository is the implementation of the storage interface for Cluster.
type GormClusterRepository struct {
db *gorm.DB
}
// NewClusterRepository creates a new storage type.
func | (db *gorm.DB) ClusterRepository {
return &GormClusterRepository{db: db}
}
// ClusterRepository represents the storage interface.
type ClusterRepository interface {
base.Exister
Load(ctx context.Context, ID uuid.UUID) (*Cluster, error)
Create(ctx context.Context, u *Cluster) error
Save(ctx context.Context, u *Cluster) error
CreateOrSave(ctx context.Context, u *Cluster) error
Delete(ctx context.Context, ID uuid.UUID) error
Query(funcs ...func(*gorm.DB) *gorm.DB) ([]Cluster, error)
FindByURL(ctx context.Context, url string) (*Cluster, error)
List(ctx context.Context, clusterType *string) ([]Cluster, error)
}
// TableName overrides the table name settings in Gorm to force a specific table name
// in the database.
func (m *GormClusterRepository) TableName() string {
return "cluster"
}
// TableName overrides the table name settings in Gorm to force a specific table name
// in the database.
func (c Cluster) TableName() string {
return "cluster"
}
// CheckExists returns nil if the given ID exists otherwise returns an error
func (m *GormClusterRepository) CheckExists(ctx context.Context, id string) error {
defer goa.MeasureSince([]string{"goa", "db", "cluster", "exists"}, time.Now())
return base.CheckHardDeletableExists(ctx, m.db, m.TableName(), "cluster_id", id)
}
// CRUD Functions
// Load returns a single Cluster as a Database Model
func (m *GormClusterRepository) Load(ctx context.Context, id uuid.UUID) (*Cluster, error) {
defer goa.MeasureSince([]string{"goa", "db", "cluster", "load"}, time.Now())
var native Cluster
err := m.db.Table(m.TableName()).Where("cluster_id = ?", id).Find(&native).Error
if err == gorm.ErrRecordNotFound {
return nil, errors.NewNotFoundError("cluster", id.String())
}
return &native, errs.WithStack(err)
}
// LoadByURL returns a single Cluster filtered using 'url'
func (m *GormClusterRepository) FindByURL(ctx context.Context, url string) (*Cluster, error) {
defer goa.MeasureSince([]string{"goa", "db", "cluster", "loadClusterByURL"}, time.Now())
var native Cluster
// make sure that the URL to use during the search also has a trailing slash (see the Cluster.Normalize() method)
err := m.db.Table(m.TableName()).Where("url = ?", httpsupport.AddTrailingSlashToURL(url)).Find(&native).Error
if err == gorm.ErrRecordNotFound {
return nil, errors.NewNotFoundErrorFromString(fmt.Sprintf("cluster with url '%s' not found", url))
}
return &native, errs.WithStack(err)
}
// Create creates a new record.
func (m *GormClusterRepository) Create(ctx context.Context, c *Cluster) error {
defer goa.MeasureSince([]string{"goa", "db", "cluster", "create"}, time.Now())
if c.ClusterID == uuid.Nil {
c.ClusterID = uuid.NewV4()
}
err := c.Normalize()
if err != nil {
return errs.WithStack(err)
}
err = m.db.Create(c).Error
if err != nil {
log.Error(ctx, map[string]interface{}{
"cluster_id": c.ClusterID.String(),
"err": err,
}, "unable to create the cluster")
return errs.WithStack(err)
}
log.Debug(ctx, map[string]interface{}{
"cluster_id": c.ClusterID.String(),
}, "Cluster created!")
return nil
}
// Save modifies a single record
func (m *GormClusterRepository) Save(ctx context.Context, c *Cluster) error {
defer goa.MeasureSince([]string{"goa", "db", "cluster", "save"}, time.Now())
existing, err := m.Load(ctx, c.ClusterID)
if err != nil {
log.Error(ctx, map[string]interface{}{
"cluster_id": c.ClusterID.String(),
"err": err,
}, "unable to update cluster")
return errs.WithStack(err)
}
return m.update(ctx, existing, c)
}
// update updates the existing cluster record with the given "new" one
func (m *GormClusterRepository) update(ctx context.Context, existing, c *Cluster) error {
err := c.Normalize()
if err != nil {
return errs.WithStack(err)
}
c.ClusterID = existing.ClusterID
c.CreatedAt = existing.CreatedAt
c.UpdatedAt = existing.UpdatedAt
err = m.db.Save(c).Error
if err != nil {
log.Error(ctx, map[string]interface{}{
"cluster_id": c.ClusterID.String(),
"err": err,
}, "unable to update cluster")
return errs.WithStack(err)
}
log.Info(ctx, map[string]interface{}{
"cluster_id": c.ClusterID.String(),
"cluster_url": c.URL,
}, "cluster saved")
return nil
}
// CreateOrSave creates cluster or saves cluster if any cluster found using url
func (m *GormClusterRepository) CreateOrSave(ctx context.Context, c *Cluster) error {
existing, err := m.FindByURL(ctx, c.URL)
if err != nil {
if ok, _ := errors.IsNotFoundError(err); ok {
return m.Create(ctx, c)
}
log.Error(ctx, map[string]interface{}{
"cluster_url": c.URL,
"err": err,
}, "unable to load cluster")
return errs.WithStack(err)
}
return m.update(ctx, existing, c)
}
// Delete removes a single record. This is a hard delete!
// Also, removes all identity/cluster relationship associated with this cluster.
func (m *GormClusterRepository) Delete(ctx context.Context, id uuid.UUID) error {
defer goa.MeasureSince([]string{"goa", "db", "cluster", "delete"}, time.Now())
toDelete, err := m.Load(ctx, id)
if err != nil {
log.Error(ctx, map[string]interface{}{
"cluster_id": id.String(),
"err": err,
}, "unable to delete the cluster")
return err
}
obj := Cluster{ClusterID: id}
result := m.db.Delete(&obj)
if result.Error != nil {
log.Error(ctx, map[string]interface{}{
"cluster_id": id.String(),
"err": result.Error,
}, "unable to delete the cluster")
return errs.WithStack(result.Error)
}
if result.RowsAffected == 0 {
return errors.NewNotFoundError("cluster", id.String())
}
log.Info(ctx, map[string]interface{}{
"cluster_id": id.String(),
"cluster_url": toDelete.URL,
}, "Cluster deleted!")
return nil
}
// Query exposes an open ended Query model
func (m *GormClusterRepository) Query(funcs ...func(*gorm.DB) *gorm.DB) ([]Cluster, error) {
defer goa.MeasureSince([]string{"goa", "db", "cluster", "query"}, time.Now())
var objs []Cluster
err := m.db.Scopes(funcs...).Table(m.TableName()).Find(&objs).Error
if err != nil && err != gorm.ErrRecordNotFound {
return nil, errs.WithStack(err)
}
log.Debug(nil, map[string]interface{}{}, "cluster query done successfully!")
return objs, nil
}
// List lists all clusters (with the given optional type)
func (m *GormClusterRepository) List(ctx context.Context, clusterType *string) ([]Cluster, error) {
funcs := []func(*gorm.DB) *gorm.DB{}
if clusterType != nil {
funcs = append(funcs, filterByType(*clusterType))
}
return m.Query(funcs...)
}
func filterByType(clusterType string) func(*gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Where("type = ?", clusterType)
}
}
| NewClusterRepository |
icon.py | import atexit
import binascii
import os
import tempfile
iconhexdata = '00000100010032321000000000007007000016000000280000003200000064' \
'00000001000400000000000807000000000000000000000000000000000000' \
'00000000000080000080000000808000800000008000800080800000808080' \
'00c0c0c0000000ff0000ff000000ffff00ff000000ff00ff00ffff0000ffff' \
'ff00ffffffffffffffffffffffffffffffffffffffffffffffffff000000ff' \
'ffffffffffffffffffffffffffffffffffffffffffffffff000000ffffffff' \
'ffffffffffffffffffffffffffffffffffffffffff000000ffffffff000000' \
'00fffff0000000fff0000000ffffffffffff000000ffffff000000000000f0' \
'0000000000f0000000ffffffffffff000000fffff0fffffffffff008ffffff' \
'ffff0ffffff0ffffffffffff000000fffff0ffffffffffff0fffffffffff0f' \
'fffff0ffffffffffff000000fffff0fffff0fffffffffff07fffff0ffffff0' \
'ffffffffffff000000fffff0fffff0ffffffffff00ffffff0ffffff0ffffff' \
'ffffff000000fffff0fffff0fffffffff00fffffff0ffffff00000000fffff' \
'000000fffff0fffff0ffffffff00fffffff00ffffff0fffff00fffff000000' \
'fffff0fffff0fffffff00fffffff00fffffff0fffff00fffff000000fffff0' \
'fffff0ffffff00fffffff00ffffffff0fffff00fffff000000fffff0fffff0' \
'fffff00fffffff00fffffffff0fffff00fffff000000fffff0fffff0fffff0' \
'fffffff00ffffffffff0fffff00fffff000000fffff0fffff0fffff0ffffff' \
'00fffffffffff0fffff00fffff000000fffff0fffff0fffff0ffffffffffff' \
'0ffffffffffff07fffff000000fffff0fffff0fffff00fffffffffff00ffff' \
'fffffff0ffffff000000fffff0fffff0ffffff00fffffffff0800fffffffff' \
'00ffffff000000fffff0000000000000f0000000000fff7000000000ffffff' \
'ff000000ffffffffffffffffffffffffffffffffffffffffffffffffff0000' \
'00fffffffffff899999fffffffffffffff899999ffffffffffff000000ffff' \
'ffffff99999999fffffffffffff99999999fffffffffff000000ffffffffff' \
'9999999999fffffffff9999999999fffffffffff000000ffffffffffffffff' \
'ffff8fffffff8fffffffffffffffffffff000000fffffffffff9999999999f' \
'ffffff9999999999ffffffffffff000000fffffffff99999999999ffffffff' \
'f79999999999ffffffffff000000ffffffff9999999999ff99fffff99ff999' \
'9999999fffffffff000000fffffff999999999ff999ff9f9ff999ff9999999' \
'99ffffffff000000ffffff999999999ff9999f99f99f9999ff999999999fff' \
'ffff000000ffffff9999999ff99999f999f999f99999ff9999999fffffff00' \
'0000ffffff99999ff9999999f999f999f9999999ff99999fffffff000000ff' \
'ffff999fff9999999f9999f9999f9999999fff999fffffff000000ffffff98' \
'fff99999999f9999f9999ff9999999fff89fffffff000000ffffffffff9999' \
'9999f99999f99999f99999999fffffffffff000000fffffffff99999999ff9' \
'9999f999997f99999999ffffffffff000000fffffffff99999999f999999f9' \
'99999f99999999ffffffffff000000fffffffff9999999ff999999f9999997' \
'f99999998fffffffff000000fffffffff999999ff9999999f9999999ff9999' \
'99ffffffffff000000fffffffff999999ff9999999f9999999ff999999ffff' \
'ffffff000000fffffffff99999ff99999999f99999999ff99999ffffffffff' \
'000000ffffffffff999fff99999999f99999999fff999fffffffffff000000' \
'ffffffffff897fff99999999f99999999ffff99fffffffffff000000ffffff' \
'fffff9ffff9999999fff9999999ffff9ffffffffffff000000ffffffffffff' \
'ffff9999999fff9999999fffffffffffffffff000000ffffffffffffffff89' \
'99999fff9999999fffffffffffffffff000000fffffffffffffffff999999f' \
'ff999999ffffffffffffffffff000000ffffffffffffffffff9999fffff999' \
'9fffffffffffffffffff000000fffffffffffffffffffff9fffff9ffffffff' \
'ffffffffffffff000000ffffffffffffffffffffffffffffffffffffffffff' \
'ffffffff000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000000000000000000000000000000000000000000000000000000000' \
'00000000'
def | (iconfile):
try:
os.remove(iconfile.name)
except Exception:
pass
with tempfile.NamedTemporaryFile(delete=False) as iconfile:
iconfile.write(binascii.a2b_hex(iconhexdata))
# Register a clean-up function.
atexit.register(lambda file=iconfile: on_closing(file)) | on_closing |
contexts.py | from __future__ import absolute_import
import six
import string
from django.utils.encoding import force_text
from sentry.interfaces.base import Interface
from sentry.utils.json import prune_empty_keys
from sentry.utils.safe import get_path, trim
__all__ = ("Contexts",)
context_types = {}
class _IndexFormatter(string.Formatter):
def format_field(self, value, format_spec):
if not format_spec and isinstance(value, bool):
return value and "yes" or "no"
return string.Formatter.format_field(self, value, format_spec)
def format_index_expr(format_string, data):
return six.text_type(_IndexFormatter().vformat(six.text_type(format_string), (), data).strip())
def contexttype(cls):
context_types[cls.type] = cls
return cls
class ContextType(object):
indexed_fields = None
type = None
def __init__(self, alias, data):
self.alias = alias
ctx_data = {}
for key, value in six.iteritems(trim(data)):
# we use simple checks here, rathern than ' in set()' to avoid
# issues with maps/lists
if value is not None and value != "":
ctx_data[force_text(key)] = value
self.data = ctx_data
def to_json(self):
rv = dict(self.data)
rv["type"] = self.type
return prune_empty_keys(rv)
@classmethod
def values_for_data(cls, data):
rv = []
for context in six.itervalues(data.get("contexts") or {}):
if context and context.get("type") == cls.type:
rv.append(context)
return rv
@classmethod
def primary_value_for_data(cls, data):
val = get_path(data, "contexts", cls.type)
if val and val.get("type") == cls.type:
return val
rv = cls.values_for_data(data)
if len(rv) == 1:
return rv[0]
def iter_tags(self):
if self.indexed_fields:
for field, f_string in six.iteritems(self.indexed_fields):
try:
value = format_index_expr(f_string, self.data)
except KeyError:
continue
if value:
if not field:
yield (self.alias, value)
else:
yield ("%s.%s" % (self.alias, field), value)
# TODO(dcramer): contexts need to document/describe expected (optional) fields
@contexttype
class DefaultContextType(ContextType):
type = "default"
@contexttype
class AppContextType(ContextType):
type = "app"
indexed_fields = {"device": u"{device_app_hash}"}
@contexttype
class DeviceContextType(ContextType):
type = "device"
indexed_fields = {"": u"{model}", "family": u"{family}"}
# model_id, arch
@contexttype
class RuntimeContextType(ContextType):
type = "runtime"
indexed_fields = {"": u"{name} {version}", "name": u"{name}"}
@contexttype
class BrowserContextType(ContextType):
type = "browser"
indexed_fields = {"": u"{name} {version}", "name": u"{name}"}
# viewport
@contexttype
class OsContextType(ContextType):
type = "os"
indexed_fields = {"": u"{name} {version}", "name": u"{name}", "rooted": u"{rooted}"}
# build, rooted
@contexttype
class GpuContextType(ContextType):
type = "gpu"
indexed_fields = {"name": u"{name}", "vendor": u"{vendor_name}"}
@contexttype
class MonitorContextType(ContextType):
type = "monitor"
indexed_fields = {"id": u"{id}"}
@contexttype
class TraceContextType(ContextType): | indexed_fields = {"": u"{trace_id}", "span": u"{span_id}", "ctx": u"{trace_id}-{span_id}"}
class Contexts(Interface):
"""
This interface stores context specific information.
"""
display_score = 1100
score = 800
@classmethod
def to_python(cls, data):
rv = {}
for alias, value in six.iteritems(data):
# XXX(markus): The `None`-case should be handled in the UI and
# other consumers of this interface
if value is not None:
rv[alias] = cls.normalize_context(alias, value)
return cls(**rv)
@classmethod
def normalize_context(cls, alias, data):
ctx_type = data.get("type", alias)
ctx_cls = context_types.get(ctx_type, DefaultContextType)
return ctx_cls(alias, data)
def iter_contexts(self):
return six.itervalues(self._data)
def to_json(self):
rv = {}
for alias, inst in six.iteritems(self._data):
rv[alias] = inst.to_json()
return rv
def iter_tags(self):
for inst in self.iter_contexts():
for tag in inst.iter_tags():
yield tag | type = "trace" |
checkpoint.py | ## @package checkpoint
# Module caffe2.python.checkpoint
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from caffe2.python import core, context
from caffe2.python.net_builder import ops
from caffe2.python.task import (
final_output,
Node,
Task,
TaskGroup,
TaskOutput,
WorkspaceType,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@context.define_context()
class Job(object):
"""
A Job defines three TaskGroups: the `init_group`, the `epoch_group` and the
`exit_group` which will be run by a JobRunner.
The `init_group` will be run only once at startup. Its role is to
initialize globally persistent blobs such as model weights, accumulators
and data file lists.
The `epoch_group` will be run in a loop after init_group. The loop will
exit when any of the stop signals added with `add_stop_condition` is True
at the end of an epoch.
The download_group will be run only once, after all the executions of
epoch_group finish. Its role is to collect the distribute scattered
parameters back after training.
The `exit_group` will be run only once at the very end of the job, the
role of this group is to save the results of training in the end of the job.
Jobs are context-driven, so that Tasks can be added to the active Job
without having to explicitly pass the job object around.
Example of usage:
def build_reader(partitions):
with Job.current().init_group:
reader = HiveReader(init_reader, ..., partitions)
Task(step=init_reader)
with Job.current().epoch_group:
limited_reader = ReaderWithLimit(reader, num_iter=10000)
data_queue = pipe(limited_reader, num_threads=8)
Job.current().add_stop_condition(limited_reader.data_finished())
return data_queue
def build_hogwild_trainer(reader, model):
with Job.current().init_group:
Task(step=model.param_init_net)
with Job.current().epoch_group:
pipe(reader, processor=model, num_threads=8)
with Job.current().exit_group:
Task(step=model.save_model_net)
with Job() as job:
reader = build_reader(partitions)
model = build_model(params)
build_hogwild_trainer(reader, model)
"""
def __init__(self,
init_group=None, epoch_group=None,
download_group=None, exit_group=None,
stop_conditions=None, nodes_to_checkpoint=None):
self.init_group = init_group or TaskGroup(
workspace_type=WorkspaceType.GLOBAL)
self.epoch_group = epoch_group or TaskGroup()
self.download_group = download_group or TaskGroup()
self.exit_group = exit_group or TaskGroup()
self.stop_conditions = stop_conditions or []
self._nodes_to_checkpoint = nodes_to_checkpoint
def nodes_to_checkpoint(self):
if self._nodes_to_checkpoint:
return self._nodes_to_checkpoint
else:
return self.init_group.used_nodes()
def compile(self, session_class):
self._nodes_to_checkpoint = self.nodes_to_checkpoint()
self.init_group = session_class.compile(self.init_group)
self.epoch_group = session_class.compile(self.epoch_group)
self.download_group = session_class.compile(self.download_group)
self.exit_group = session_class.compile(self.exit_group)
def __enter__(self):
self.epoch_group.__enter__()
return self
def __exit__(self, *args):
self.epoch_group.__exit__()
def add_stop_condition(self, output):
if isinstance(output, core.BlobReference):
t = Task(outputs=[output], group=self.epoch_group)
output = t.outputs()[0]
assert isinstance(output, TaskOutput)
self.stop_conditions.append(output)
def get_ckpt_filename(node_name, epoch):
"""Returns the checkpoint filename.
Args:
node_name: A string. The name of the node.
epoch: An integer. The checkpoint epoch.
Returns:
ckpt_filename: A string. The filename of the checkpoint.
"""
return node_name + '.' + str(epoch)
def db_name(epoch, node_name, db_prefix, path_prefix=None):
"""Returns the full db name where checkpoint files are saved.
Args:
epoch: An integer. The checkpoint epoch.
node_name: A string. The name of the node.
db_prefix: A string. The prefix used to construct full db name.
path_prefix: A string. Optional param used to construct db name or path
where checkpoint files are are stored.
Returns:
db_name: A string. The absolute path of full_db_name where checkpoint
files are saved
"""
if path_prefix:
db_name = path_prefix + get_ckpt_filename(node_name, epoch)
else:
ckpt_filename = get_ckpt_filename(node_name, epoch)
db_name = os.path.join(db_prefix, ckpt_filename)
return db_name
class CheckpointManager(object):
"""
Controls saving and loading of workspaces on every epoch boundary of a job.
If a CheckpointManager instance is passed to JobRunner, then JobRunner will
call `init`, `read` and `save` at different moments in between epoch runs.
Args:
db_prefix: The prefix used to construct full db name. Since `absolute_path`
is set to True, this will be used as db_name in SaveOp.
node_name: Name of the node where this checkpoint_manager is used.
db_type: Type of database to use for storing checkpoint.
metadata_handler: An optional object capable of reading/writing
checkpoint info in storage of choice.
"""
BLOB_NAMES = "blob_names"
def __init__(self, db_prefix, node_name, db_type, metadata_handler=None):
self._db_prefix = db_prefix
self._node_name = node_name
self._db_type = db_type
self._metadata_handler = metadata_handler
# make sure these blobs are the first in the checkpoint file.
self._net = core.Net('!!checkpoint_mngr')
self._blob_names = self._net.AddExternalInput(self.BLOB_NAMES)
self._names_output = None
self._path_prefix = None
self._path_type = None
self._current_db_name = None
self._current_checkpoint_duration = None
"""
Initialize the checkpoint manager. Determines all blobs that need to be saved
or loads from a checkpoint.
Args:
nodes: An array of nodes where this checkpoint manager is running. Should
only contain a single node.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
def init(
self,
nodes=None,
retrieve_from_epoch=None,
path_prefix=None,
path_type=None
):
"""
Build a Task that will be run once after the job's `init_group` is run.
This task will determine which blobs need to be checkpointed.
If retrieve_from_epoch is not None, then the checkpoint metadata is
retrieved from a previously saved checkpoint.
"""
assert nodes is None or len(nodes) == 1, (
'CheckpointManager only supports single node.')
with Task(outputs=[self._blob_names]) as task:
if retrieve_from_epoch is None:
ops.GetAllBlobNames(
[],
self._blob_names,
include_shared=False)
else:
full_db_name = db_name(retrieve_from_epoch,
self._node_name, self._db_prefix, path_prefix)
db_type = path_type or self._db_type
logger.info("Initializing checkpoints from = %s"
% full_db_name)
ops.Load(
[], self._blob_names,
db=full_db_name,
db_type=db_type,
absolute_path=True,
keep_device=True,
)
self._names_output = task.outputs()[0]
return task
def blob_list(self):
assert self._names_output
return self._names_output.fetch().tolist()
def _timed_task(self, cp_op_name, add_op):
"""
Build a Task that will measure the time span of checkpoint operations,
once operation is done, time can be read from _current_checkpoint_duration.
Args:
cp_op_name: A string name of the checkpoint operation.
add_op: A functor to add the checkpoint operation.
Returns:
A task with timer.
"""
with Task(name=cp_op_name) as task:
with ops.task_init():
timer = ops.TimerBegin([], counter_name=self._node_name)
add_op()
with ops.task_exit():
time_span_blob = ops.TimerGetAndEnd(timer)
self._current_checkpoint_duration = final_output(time_span_blob)
return task
def collect_checkpoint_stats(self, stats):
"""
Add one checkpoint stats into the stats.
Args:
stats: A dict of checkpoint stats that will be reported.
"""
if self._current_db_name and self._current_checkpoint_duration:
stats[self._current_db_name] = self._current_checkpoint_duration.fetch()[0]
else:
logger.info(
"Failed to collect checkpoint stats: {}".format(
self._current_db_name
)
)
def load(self, epoch, path_prefix=None, path_type=None):
"""
Build a Task that will be run by JobRunner when the job is to be
resumed from a given epoch. This task will run a Load op that will
load and deserialize all relevant blobs from a persistent storage.
"""
self._current_db_name = db_name(
epoch, self._node_name, self._db_prefix, path_prefix
)
db_type = path_type or self._db_type
logger.info("Loading checkpoints from = %s" % self._current_db_name)
def add_op():
ops.Load(
[],
self.blob_list(),
db=self._current_db_name,
db_type=db_type,
absolute_path=True,
keep_device=True,
)
return self._timed_task('checkpoint_load', add_op)
def load_blobs_from_checkpoint(self, blob_names, epoch):
"""
Builds a Task that loads only the necessary blobs from a checkpoint of
the given epoch. The necessary blobs are given in the blob_names
argument.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: The checkpoint epoch to load from.
Returns:
A Task which loads the specified blobs from the checkpoint of the
given epoch.
"""
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info('Load from %s' % self._current_db_name)
def add_op():
ops.Load(
[],
blob_names,
db=self._current_db_name,
db_type=self._db_type,
absolute_path=True,
allow_incomplete=True)
return self._timed_task('checkpoint_partial_load', add_op)
def check_db_exists(self, epoch):
logger.info('Check existence of %s' %
db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
existence = ops.Const(False)
ops.DBExists(
[],
[existence],
db_name=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type,
absolute_path=True)
task.add_output(existence)
return task
def report_checkpoint_stats(self, action_name):
"""
Report checkpoint operation stats for current node.
Args:
action_name: A string of the name of checkpoint operation.
"""
all_stats = {}
self.collect_checkpoint_stats(all_stats)
if self._metadata_handler:
self._metadata_handler.report(action_name, all_stats)
def save(self, epoch):
|
def write_checkpoint_metadata(self, epoch):
"""
Write metadata for checkpoint
Args:
epoch: An integer. The epoch-id for which checkpoint metadata is
written
"""
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
"""
Identify the epoch-id from which Job must resume
Args:
user_epoch: An integer. Optional parameter for user to explicitly
identify the epoch-id to load checkpoint from
Retruns:
epoch: the epoch-id to load checkpoints from
or None if no checkpoints were written
"""
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=[str(self._node_name)],
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
"""Returns True if Checkpoint data is accessible
Args:
epoch: An integer. The epoch of the checkpoint. If None,
it implies we need to check if checkpoint directory is accessible
Returns:
is_cp_accessible: A boolean. Returns True if Checkpoint data is accessible
"""
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class MultiNodeCheckpointManager(object):
"""
Coordinates checkpointing and checkpointing across multiple nodes.
Each of `init`, `load` and `save` will build TaskGroups which will
trigger checkpointing on each of the nodes involved in a distributed job.
Args:
db_prefix: The prefix used to construct full db name. Since `absolute_path`
is set to True, this will be used as db_name in SaveOp.
db_type: Type of database to use for storing checkpoint.
metadata_handler: An optional object capable of reading/writing
checkpoint info in storage of choice.
"""
def __init__(self, db_prefix, db_type, metadata_handler=None):
self._node_managers = None
self._db_prefix = db_prefix
self._db_type = db_type
self._metadata_handler = metadata_handler
self._path_prefix = None
self._path_type = None
def _task_group(self, func, *args, **kw):
assert self._node_managers is not None, 'init must be called first.'
with TaskGroup(WorkspaceType.GLOBAL) as task_group:
for node, manager in self._node_managers:
with Node(node):
func(manager, *args, **kw)
return task_group
"""
Args:
nodes: An array of nodes where this checkpoint manager is running.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
def init(
self, nodes, retrieve_from_epoch=None, path_prefix=None, path_type=None
):
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
return TaskGroup(WorkspaceType.GLOBAL)
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
return self._task_group(
CheckpointManager.init,
nodes=[node],
retrieve_from_epoch=retrieve_from_epoch,
path_prefix=path_prefix,
path_type=path_type)
def load(self, epoch, path_prefix=None, path_type=None):
return self._task_group(
CheckpointManager.load,
epoch,
path_prefix=path_prefix,
path_type=path_type)
def load_blobs_locally(self, nodes, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints to the current node.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the Load ops.
"""
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
else:
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
assert self._node_managers is not None, 'must initialize node managers'
for _, manager in self._node_managers:
existence_task = manager.check_db_exists(epoch)
session.run(existence_task)
existence = existence_task.outputs()[0].fetch()
if not existence:
logger.info('DB %s does not exist!' %
db_name(epoch, manager._node_name, manager._db_prefix))
return False
load_task = manager.load_blobs_from_checkpoint(blob_names, epoch)
session.run(load_task)
logger.info('Successfully loaded from checkpoints.')
return True
def get_ckpt_db_name(self, node_name, epoch):
"""Returns the DB name of the given node and the given epoch.
The DB name is effectively the checkpoint path of the given node and
the given epoch.
Args:
node_name: A string. The node name of interest.
epoch: An integer. The epoch of the checkpoint.
Returns:
checkpoint_db_name: A string. The checkpoint path of the given
node and the given epoch.
"""
for node, manager in self._node_managers:
if str(node) == node_name:
return db_name(epoch, manager._node_name, manager._db_prefix)
def report_checkpoint_stats(self, action_name):
"""
Report the checkpoint stats for all the nodes, we need to aggregate all
the node's stats together so that we know which node's checkpoint
operation dominates.
Args:
action_name: A string of the name of checkpoint operation.
"""
all_stats = {}
for _, manager in self._node_managers:
manager.collect_checkpoint_stats(all_stats)
logger.debug("checkpoint stats: {}".format(all_stats))
if self._metadata_handler:
self._metadata_handler.report(action_name, all_stats)
def save(self, epoch):
"""
Build a Task that will execute a Save ops to serialize and persist
blobs present in the global workspace.
"""
return self._task_group(CheckpointManager.save, epoch)
def write_checkpoint_metadata(self, epoch):
"""
Write metadata for checkpoint
Args:
epoch: An integer. The epoch-id for which checkpoint metadata is
written
"""
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
"""
Identify the epoch-id from which Job must resume
Args:
user_epoch: An integer. Optional parameter for user to explicitly
identify the epoch-id to load checkpoint from
Retruns:
epoch: the epoch-id to load checkpoints from
or None if no checkpoints were written
"""
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
self._node_names = [str(node) for node in nodes]
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=self._node_names,
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
"""Returns True if Checkpoint data is accessible
Args:
epoch: An integer. The epoch of the checkpoint. If None,
it implies we need to check if checkpoint directory is accessible
Returns:
is_cp_accessible: A boolean. Returns True if Checkpoint data is accessible
"""
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class UploadTaskGroupBuilder(object):
"""A simple class to upload checkpoints."""
def build(self, epoch, checkpoint_manager):
"""Builds the task group to upload checkpoints.
Args:
epoch: An integer. The checkpoint epoch to be uploaded.
checkpoint_manager: Can be a CheckpointManager for single machine
or a MultiNodeCheckpointManager for multi-machine. The manager
that initializes/saves/loads checkpoints.
Raises:
NotImplementedError: This base class only has the interface,
the implementation will be in the subclasses.
"""
raise NotImplementedError()
class JobRunner(object):
"""
Implement the runtime logic for jobs with checkpointing at the level of
epoch. Can be used to run either single-host or distributed jobs. Job
runner is a callable to be called once from the master, passing a session
as an argument. This call will block until the Job execution is complete.
If a checkpoint_manager is passed, checkpoints will be taken after
initialization and after each epoch execution. If, in addition,
`resume_from_epoch` is an epoch number, the corresponding checkpoint will
be loaded and job execution will continue from the given epoch. In
this case, the job's init_group will not be run.
Refer to checkpoint_test.py for an example.
"""
def __init__(self, job, checkpoint_manager=None, resume_from_epoch=None,
upload_task_group_builder=None):
"""Initializes the JobRunner.
Args:
job: A Job object. The job to be executed.
checkpoint_manager: Can be a CheckpointManager for single machine
or a MultiNodeCheckpointManager for multi-machine. The manager
that initializes/saves/loads checkpoints.
resume_from_epoch: An integer. The epoch to resume from.
upload_task_group_builder: A subclass of the
UploadTaskGroupBuilder. Creates a task group to upload
checkpoints.
"""
self.resume_from_epoch = resume_from_epoch
self.checkpoint_manager = checkpoint_manager
self.job = job
self.upload_task_group_builder = upload_task_group_builder
def train(self, session):
"""Runs the training flow.
Args:
session: A Session object. Valid choises are: LocalSession,
LocalHostScheduler, and DistributedSession. It is used to
execute one TaskGroup a time.
"""
# identify the epoch we must resume from
if self.checkpoint_manager:
self.checkpoint_manager.set_params(nodes=self.job.nodes_to_checkpoint())
self.resume_from_epoch = self.checkpoint_manager.\
get_resume_from_epoch_id(self.resume_from_epoch)
if self.resume_from_epoch is not None:
logger.info('Resuming from epoch {}'.format(self.resume_from_epoch))
# Initialize all the nodes.
from_scratch = self.resume_from_epoch is None
if from_scratch:
session.run(self.job.init_group)
if self.checkpoint_manager:
logger.info('Preparing checkpoints ...')
session.run(self.checkpoint_manager.init(
self.job.nodes_to_checkpoint(),
retrieve_from_epoch=self.resume_from_epoch))
# Save the first checkpoint before training starts, or resume from
# a previously saved checkpoint.
if from_scratch:
self.save_checkpoints(0, session)
else:
logger.info('Loading checkpoints for epoch {} ...'.format(
self.resume_from_epoch))
session.run(
self.checkpoint_manager.load(self.resume_from_epoch))
self.checkpoint_manager.report_checkpoint_stats('checkpoint_load')
logger.info('Checkpoint loaded')
logger.info("Finished initializing")
# Start training.
epoch = 1 if from_scratch else self.resume_from_epoch + 1
while True:
logger.info('Starting epoch %d' % epoch)
session.run(self.job.epoch_group)
logger.info('Finished epoch %d' % epoch)
stop_conditions = [o.fetch() for o in self.job.stop_conditions]
if self.checkpoint_manager:
self.save_checkpoints(epoch, session)
if any(stop_conditions):
logger.info('Stopping')
break
epoch += 1
logger.info('Finished training')
# Upload the checkpoints.
if (self.upload_task_group_builder):
upload_task_group = self.upload_task_group_builder.build(
epoch, self.checkpoint_manager)
session.run(upload_task_group)
logger.info('Finished uploading the checkpoints')
# Download the parameters to save
session.run(self.job.download_group)
logger.info('Finished downloading the parameters')
# Finally run the exit step to save nets
session.run(self.job.exit_group)
logger.info('Finished running the exit group')
return epoch
def load_blobs_from_checkpoints(self, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints.
Checkpoints store the snapshots of the workspace in each node.
Sometimes we only need to load a subset of the blobs from the
checkpoints. One common scenario is to load only the model blobs from
the checkpoints for evaluation purpose. Given the names of the
necessary blobs, this function goes over all the checkpoints of all the
nodes, but only loads the blobs specified in the blob_names to the
current workspace.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the load ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
logger.info('Loading checkpoint for epoch {} ...'.format(epoch))
result = self.checkpoint_manager.load_blobs_locally(
self.job.nodes_to_checkpoint(), blob_names, epoch, session)
self.checkpoint_manager.report_checkpoint_stats('checkpoint_partial_load')
return result
def save_checkpoints(self, epoch, session):
"""Triggers operation to save checkpoints
This method will trigger the Save ops to serialize and persist the
blobs present in the global workspaace.
Args:
epoch: An integer. The checkpoint epoch-id that we are saving.
session: A Session object to execute the save ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
try:
is_accessible = self.checkpoint_manager.cp_accessible(epoch=None)
if is_accessible:
logger.info('Saving checkpoints for epoch {}'.format(epoch))
session.run(self.checkpoint_manager.save(epoch))
self.checkpoint_manager.write_checkpoint_metadata(epoch)
logger.info('Checkpoints saved')
self.checkpoint_manager.report_checkpoint_stats('checkpoint_save')
else:
logger.warning("Checkpoint files cannot be accessed!")
except Exception as ex:
logger.warning("Unable to write checkpoint for epoch {}. Error={}".
format(epoch, ex))
def epoch_limiter(job, num_epochs):
"""
Creates a task that will output True when a given
number of epochs has finished.
"""
with job.init_group:
init_net = core.Net('epoch_counter_init')
counter = init_net.CreateCounter([], init_count=num_epochs - 1)
Task(step=init_net)
with job.epoch_group:
epoch_net = core.Net('epoch_countdown')
finished = epoch_net.CountDown(counter)
output = Task(step=epoch_net, outputs=finished).outputs()[0]
job.add_stop_condition(output)
| """
Build a Task that is run once after `init_group` and after each
epoch is run. This will execute a Save ops to serialize and persist
blobs present in the global workspace.
"""
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info('Saving to %s' % self._current_db_name)
def add_op():
ops.Save(
self.blob_list(), [],
db=self._current_db_name,
db_type=self._db_type,
absolute_path=True)
return self._timed_task('checkpoint_save', add_op) |
list.rs | use std::fmt;
use prettytable::{cell, row};
use lib_core_ui::*;
use lib_interop::models::DRunner;
use crate::widgets::{RunnerNameWidget, RunnerStatusWidget};
pub struct RunnerListWidget<'a> {
runners: &'a [DRunner],
}
impl<'a> RunnerListWidget<'a> {
pub fn | (runners: &'a [DRunner]) -> Self {
Self { runners }
}
}
impl fmt::Display for RunnerListWidget<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.runners.is_empty() {
return writeln!(f, "There are no runners");
}
let mut table = table! {
format: FORMAT_NO_BORDER,
titles: ["▲ Name", "Status", "Joined at", "Last heartbeat at"],
};
for runner in self.runners {
table.add_row(row![
RunnerNameWidget::new(&runner.name),
RunnerStatusWidget::new(&runner.status),
DateTimeWidget::new(runner.joined_at),
DateTimeWidget::new(runner.last_heartbeat_at),
]);
}
write!(f, "{}", table)
}
} | new |
main.go | package main
import (
"flag"
"fmt"
"os"
"time"
sigar "github.com/cloudfoundry/gosigar"
)
var (
targetPercentage float64
outputFile string
)
func main() {
flag.Float64Var(&targetPercentage, "P", 50.0,
"Set this to an integer which represents the percentage of system RAM to reserve for InnoDB's buffer pool") |
mem := sigar.Mem{}
mem.Get()
totalMem := mem.Total
fmt.Printf("%s Total memory in bytes: %d\n", time.Now().UTC().Format(time.RFC3339Nano), mem.Total)
file, err := os.OpenFile(outputFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
panic(err)
}
defer file.Close()
if err := Generate(totalMem, targetPercentage, file); err != nil {
fmt.Printf("%s generating %s failed: %s\n",
time.Now().UTC().Format(time.RFC3339Nano), outputFile, err)
os.Exit(1)
}
} | flag.StringVar(&outputFile, "f", "",
"Target file for rendering MySQL option file")
flag.Parse() |
test_remote.py | import pytest
from src.conanbuilder.remote import Remote
@pytest.fixture
def remote():
return Remote("myName", "myUrl")
def test_default_values(remote):
assert remote.name == "myName"
assert remote.url == "myUrl"
assert remote.verify_ssl is True | assert remote.priority == 0
assert remote.force is False
assert remote.login is False |
|
box_items.rs | use crate::error::{self, Result};
use crate::patch::*;
use crate::sav::{Sav, SaveItem};
impl Sav {
/// Get save box items
///
/// # Example
///
/// ```
/// use pkmnapi_db::sav::*;
/// use std::fs;
/// # use std::env;
/// # let sav_path = env::var("PKMN_SAV").expect("Set the PKMN_SAV environment variable to point to the SAV location");
///
/// let sav_data = fs::read(sav_path).unwrap();
/// let sav = Sav::new(&sav_data).unwrap();
///
/// let box_items = sav.get_box_items().unwrap();
///
/// assert_eq!(
/// box_items,
/// vec![
/// SaveItem {
/// item_id: 0x04,
/// amount: 0x05
/// },
/// SaveItem {
/// item_id: 0x14,
/// amount: 0x02
/// }
/// ]
/// );
/// ```
pub fn get_box_items(&self) -> Result<Vec<SaveItem>> |
/// Set save box items
///
/// # Example
///
/// ```
/// use pkmnapi_db::patch::*;
/// use pkmnapi_db::sav::*;
/// use std::fs;
/// # use std::env;
/// # let sav_path = env::var("PKMN_SAV").expect("Set the PKMN_SAV environment variable to point to the SAV location");
///
/// let sav_data = fs::read(sav_path).unwrap();
/// let sav = Sav::new(&sav_data).unwrap();
///
/// let patch = sav
/// .set_box_items(
/// &vec![
/// SaveItem {
/// item_id: 0x01,
/// amount: 0x20
/// }
/// ]
/// )
/// .unwrap();
///
/// assert_eq!(
/// patch,
/// Patch {
/// offset: 0x27E6,
/// length: 0x04,
/// data: vec![0x01, 0x01, 0x20, 0xFF]
/// }
/// );
/// ```
pub fn set_box_items(&self, save_box_items: &Vec<SaveItem>) -> Result<Patch> {
let offset = 0x27E6;
let max_len = 50;
let item_count = save_box_items.len();
if item_count > max_len {
return Err(error::Error::SavBoxItemsWrongSize(max_len, item_count));
}
let save_box_items_data: Vec<u8> = save_box_items
.iter()
.take(max_len)
.map(|save_bag_item| save_bag_item.to_raw())
.flatten()
.collect();
let data = [vec![item_count as u8], save_box_items_data, vec![0xFF]].concat();
Ok(Patch::new(&offset, &data))
}
}
| {
let offset = 0x27E6;
let max_len = 50;
let item_count = self.sav[offset] as usize;
let offset = offset + 1;
let save_box_items: Vec<SaveItem> = self.sav[offset..(offset + (max_len * 2))]
.chunks(2)
.take(item_count)
.map(|chunk| SaveItem::from(chunk))
.collect();
Ok(save_box_items)
} |
dashboard.go | package model
import (
"fmt"
"regexp"
"github.com/slok/grafterm/internal/service/unit"
) | const (
// defGridMaxWidth is the default grid width used when is not set.
defGridMaxWidth = 100
)
// Dashboard represents a dashboard.
type Dashboard struct {
Grid Grid `json:"grid,omitempty"`
Variables []Variable `json:"variables,omitempty"`
Widgets []Widget `json:"widgets,omitempty"`
}
// Variable is a dynamic variable that will be available through the
// dashboard.
type Variable struct {
Name string `json:"name,omitempty"`
VariableSource `json:",inline"`
}
// VariableSource is the variable kind with it's data.
type VariableSource struct {
Constant *ConstantVariableSource `json:"constant,omitempty"`
Interval *IntervalVariableSource `json:"interval,omitempty"`
}
// ConstantVariableSource represents the constant variables.
type ConstantVariableSource struct {
Value string `json:"value,omitempty"`
}
// IntervalVariableSource represents the interval variables.
type IntervalVariableSource struct {
Steps int `json:"steps,omitempty"`
}
// Widget represents a widget.
type Widget struct {
Title string `json:"title,omitempty"`
GridPos GridPos `json:"gridPos,omitempty"`
WidgetSource `json:",inline"`
}
// Grid represents the options of the grid in the dashboard.
type Grid struct {
// Fixed means that the grid positions (gridPos) of the widgets
// will be fixed and need X and Y values.
// If false it will be adaptive and will ignore X and Y values
// and only use the size of the widget (W, width).
FixedWidgets bool `json:"fixedWidgets,omitempty"`
// MaxWidth is the maximum width (horizontal) the Grid will have, this will be
// the scale for the widgets `GridPos.W`. For example a `GridPos.W: 50`
// in a `Grid.MaxWidth: 100` would be half of the row, but in a `Grid.MaxWidth: 1000`
// would be a 5% of the row.
// Not setting MaxWidth or setting to 0 would fallback to default MaxWidth.
MaxWidth int `json:"maxWidth,omitempty"`
}
// GridPos represents the grid position.
type GridPos struct {
// X represents the position on the grid (from 0 to 100).
X int `json:"x,omitempty"`
// Y represents the position on the grid (from 0 to infinite,
// where the total will be used using all the widgets Y and H).
Y int `json:"y,omitempty"`
// W represents the width of the widget (same unit as X).
W int `json:"w,omitempty"`
// TODO(slok): H represents the height of the widget (same unit as Y).
// H int `json:"h,omitempty"`
}
// WidgetSource will tell what kind of widget is.
type WidgetSource struct {
Singlestat *SinglestatWidgetSource `json:"singlestat,omitempty"`
Gauge *GaugeWidgetSource `json:"gauge,omitempty"`
Graph *GraphWidgetSource `json:"graph,omitempty"`
}
// SinglestatWidgetSource represents a simple value widget.
type SinglestatWidgetSource struct {
ValueRepresentation `json:",inline"`
Query Query `json:"query,omitempty"`
ValueText string `json:"valueText,omitempty"`
Thresholds []Threshold `json:"thresholds,omitempty"`
}
// GaugeWidgetSource represents a simple value widget in donut format.
type GaugeWidgetSource struct {
Query Query `json:"query,omitempty"`
PercentValue bool `json:"percentValue,omitempty"`
Max int `json:"max,omitempty"`
Min int `json:"min,omitempty"`
Thresholds []Threshold `json:"thresholds,omitempty"`
}
// GraphWidgetSource represents a simple value widget in donut format.
type GraphWidgetSource struct {
Queries []Query `json:"queries,omitempty"`
Visualization GraphVisualization `json:"visualization,omitempty"`
}
// Query is the query that will be made to the datasource.
type Query struct {
Expr string `json:"expr,omitempty"`
// Legend accepts `text.template` format.
Legend string `json:"legend,omitempty"`
DatasourceID string `json:"datasourceID,omitempty"`
}
// Threshold is a color threshold that is composed
// with the start value, 0 means the base or starting
// threshold.
type Threshold struct {
StartValue float64 `json:"startValue"`
Color string `json:"color"`
}
// GraphVisualization controls how the graph will visualize
// lines, colors, legend...
type GraphVisualization struct {
SeriesOverride []SeriesOverride `json:"seriesOverride,omitempty"`
Legend Legend `json:"legend,omitempty"`
YAxis YAxis `json:"yAxis,omitempty"`
}
// SeriesOverride will override visualization based on
// the regex legend.
type SeriesOverride struct {
Regex string `json:"regex,omitempty"`
CompiledRegex *regexp.Regexp
Color string `json:"color,omitempty"`
}
// Legend controls the legend of a widget.
type Legend struct {
Disable bool `json:"disable,omitempty"`
RightSide bool `json:"rightSide,omitempty"`
}
// YAxis controls the YAxis of a widget.
type YAxis struct {
ValueRepresentation `json:",inline"`
}
// ValueRepresentation controls the representation of a value.
type ValueRepresentation struct {
Unit string `json:"unit,omitempty"`
Decimals int `json:"decimals,omitempty"`
}
// Validate validates the object model is correct.
// A correct object means that also it will autofill the
// required default attributes so the object ends in a
// valid state.
func (d *Dashboard) Validate() error {
err := d.Grid.validate()
if err != nil {
return err
}
for _, v := range d.Variables {
err := v.validate()
if err != nil {
return err
}
}
// Validate individual widgets.
for _, w := range d.Widgets {
err := w.validate(*d)
if err != nil {
return err
}
}
// TODO(slok): Validate all widgets as a whole (for example total of grid)
return nil
}
func (g *Grid) validate() error {
if g.MaxWidth <= 0 {
g.MaxWidth = defGridMaxWidth
}
return nil
}
func (v Variable) validate() error {
if v.Name == "" {
return fmt.Errorf("variables should have a name")
}
// Variable type checks.
switch {
case v.VariableSource.Constant != nil:
c := v.VariableSource.Constant
if c.Value == "" {
return fmt.Errorf("%s constant variable needs a value", v.Name)
}
case v.VariableSource.Interval != nil:
i := v.VariableSource.Interval
if i.Steps <= 0 {
return fmt.Errorf("%s interval variable step should be > 0", v.Name)
}
default:
return fmt.Errorf("%s variable is empty, it should be of a specific type", v.Name)
}
return nil
}
func (w Widget) validate(d Dashboard) error {
err := w.GridPos.validate(d.Grid)
if err != nil {
return fmt.Errorf("error on %s widget grid position: %s", w.Title, err)
}
switch {
case w.Gauge != nil:
err := w.Gauge.validate()
if err != nil {
return fmt.Errorf("error on %s gauge widget: %s", w.Title, err)
}
case w.Singlestat != nil:
err := w.Singlestat.validate()
if err != nil {
return fmt.Errorf("error on %s singlestat widget: %s", w.Title, err)
}
case w.Graph != nil:
err := w.Graph.validate()
if err != nil {
return fmt.Errorf("error on %s graph widget: %s", w.Title, err)
}
}
return nil
}
func (g GridPos) validate(gr Grid) error {
if g.W <= 0 {
return fmt.Errorf("widget grid position should have a width")
}
if gr.FixedWidgets && g.X <= 0 {
return fmt.Errorf("widget grid position in a fixed grid should have am X position")
}
if gr.FixedWidgets && g.Y <= 0 {
return fmt.Errorf("widget grid position in a fixed grid should have am Y position")
}
return nil
}
func (g GaugeWidgetSource) validate() error {
err := g.Query.validate()
if err != nil {
return fmt.Errorf("query error on gauge widget: %s", err)
}
if g.PercentValue && g.Max <= g.Min {
return fmt.Errorf("a percent based gauge max should be greater than min")
}
err = validateThresholds(g.Thresholds)
if err != nil {
return fmt.Errorf("thresholds error on gauge widget: %s", err)
}
return nil
}
func (s SinglestatWidgetSource) validate() error {
err := s.Query.validate()
if err != nil {
return fmt.Errorf("query error on singlestat widget: %s", err)
}
err = s.ValueRepresentation.validate()
if err != nil {
return err
}
err = validateThresholds(s.Thresholds)
if err != nil {
return fmt.Errorf("thresholds error on singlestat widget: %s", err)
}
return nil
}
func (g GraphWidgetSource) validate() error {
if len(g.Queries) <= 0 {
return fmt.Errorf("graph must have at least one query")
}
for _, q := range g.Queries {
err := q.validate()
if err != nil {
return err
}
}
sos, err := validateSeriesOverride(g.Visualization.SeriesOverride)
if err != nil {
return fmt.Errorf("series override error on graph widget: %s", err)
}
g.Visualization.SeriesOverride = sos
err = g.Visualization.YAxis.validate()
if err != nil {
return err
}
return nil
}
func (q Query) validate() error {
if q.Expr == "" {
return fmt.Errorf("query must have an expression")
}
if q.DatasourceID == "" {
return fmt.Errorf("query must have have a datosource ID")
}
return nil
}
func validateThresholds(ts []Threshold) error {
startValues := map[float64]struct{}{}
for _, t := range ts {
_, ok := startValues[t.StartValue]
if ok {
return fmt.Errorf("threshold start value settings can't be repeated in multiple thresholds")
}
startValues[t.StartValue] = struct{}{}
}
return nil
}
func (y YAxis) validate() error {
err := y.ValueRepresentation.validate()
if err != nil {
return err
}
return nil
}
func (v ValueRepresentation) validate() error {
_, err := unit.NewUnitFormatter(v.Unit)
if err != nil {
return fmt.Errorf("%s is an invalid unit", v.Unit)
}
return nil
}
func (s SeriesOverride) validate() error {
if s.Regex == "" {
return fmt.Errorf("a graph override for series should have a regex")
}
return nil
}
func validateSeriesOverride(sos []SeriesOverride) ([]SeriesOverride, error) {
regexes := map[string]struct{}{}
for i, s := range sos {
err := s.validate()
if err != nil {
return sos, err
}
_, ok := regexes[s.Regex]
if ok {
return sos, fmt.Errorf("series override regex setting can't be repeated in multiple series override")
}
regexes[s.Regex] = struct{}{}
// Compile the regex.
re, err := regexp.Compile(s.Regex)
if err != nil {
return sos, err
}
s.CompiledRegex = re
sos[i] = s
}
return sos, nil
} |
// Defaults. |
server-grpc.ts | import { isObject, isUndefined } from '@nestjs/common/utils/shared.utils';
import { fromEvent } from 'rxjs';
import { takeUntil } from 'rxjs/operators';
import {
CANCEL_EVENT,
GRPC_DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH,
GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH,
GRPC_DEFAULT_PROTO_LOADER,
GRPC_DEFAULT_URL
} from '../constants';
import { InvalidGrpcPackageException } from '../exceptions/errors/invalid-grpc-package.exception';
import { InvalidProtoDefinitionException } from '../exceptions/errors/invalid-proto-definition.exception';
import { CustomTransportStrategy } from '../interfaces';
import {
GrpcOptions,
MicroserviceOptions,
} from '../interfaces/microservice-configuration.interface';
import { Server } from './server';
let grpcPackage: any = {};
let grpcProtoLoaderPackage: any = {};
export class ServerGrpc extends Server implements CustomTransportStrategy {
private readonly url: string;
private grpcClient: any;
constructor(private readonly options: MicroserviceOptions['options']) {
super();
this.url =
this.getOptionsProp<GrpcOptions>(options, 'url') || GRPC_DEFAULT_URL;
const protoLoader =
this.getOptionsProp<GrpcOptions>(options, 'protoLoader') ||
GRPC_DEFAULT_PROTO_LOADER;
grpcPackage = this.loadPackage('grpc', ServerGrpc.name);
grpcProtoLoaderPackage = this.loadPackage(protoLoader, ServerGrpc.name);
}
public async listen(callback: () => void) {
this.grpcClient = this.createClient();
await this.start(callback);
}
public async start(callback?: () => void) {
await this.bindEvents();
this.grpcClient.start();
callback();
}
public async bindEvents() {
const grpcContext = this.loadProto();
const packageName = this.getOptionsProp<GrpcOptions>(
this.options,
'package',
);
const grpcPkg = this.lookupPackage(grpcContext, packageName);
if (!grpcPkg) {
const invalidPackageError = new InvalidGrpcPackageException();
this.logger.error(invalidPackageError.message, invalidPackageError.stack);
throw invalidPackageError;
}
// Take all of the services defined in grpcPkg and assign them to
// method handlers defined in Controllers
for (const definition of this.getServiceNames(grpcPkg)) {
this.grpcClient.addService(
// First parameter requires exact service definition from proto
definition.service.service,
// Here full proto definition required along with namespaced pattern name
await this.createService(definition.service, definition.name),
);
}
}
/**
* Will return all of the services along with their fully namespaced
* names as an array of objects.
* This method initiates recursive scan of grpcPkg object
*/
public getServiceNames(grpcPkg: any): { name: string; service: any }[] {
// Define accumulator to collect all of the services available to load
const services: { name: string; service: any }[] = [];
// Initiate recursive services collector starting with empty name
this.collectDeepServices('', grpcPkg, services);
return services;
}
public async createService(grpcService: any, name: string) {
const service = {};
// tslint:disable-next-line:forin
for (const methodName in grpcService.prototype) {
const methodHandler = this.messageHandlers[
this.createPattern(name, methodName)
];
if (!methodHandler) {
continue;
}
service[methodName] = await this.createServiceMethod(
methodHandler,
grpcService.prototype[methodName],
);
}
return service;
}
public createPattern(service: string, methodName: string): string {
return JSON.stringify({
service,
rpc: methodName,
});
}
public createServiceMethod(
methodHandler: Function,
protoNativeHandler: any,
): Function {
return protoNativeHandler.responseStream
? this.createStreamServiceMethod(methodHandler)
: this.createUnaryServiceMethod(methodHandler);
}
public createUnaryServiceMethod(methodHandler): Function {
return async (call, callback) => {
const handler = methodHandler(call.request, call.metadata);
this.transformToObservable(await handler).subscribe(
data => callback(null, data),
err => callback(err),
);
};
}
public createStreamServiceMethod(methodHandler): Function {
return async (call, callback) => {
const handler = methodHandler(call.request, call.metadata);
const result$ = this.transformToObservable(await handler);
await result$
.pipe(takeUntil(fromEvent(call, CANCEL_EVENT)))
.forEach(data => call.write(data));
call.end();
};
}
public close() {
this.grpcClient && this.grpcClient.forceShutdown();
this.grpcClient = null;
}
public deserialize(obj): any {
try {
return JSON.parse(obj);
} catch (e) {
return obj;
}
}
public createClient(): any {
const server = new grpcPackage.Server({
'grpc.max_send_message_length': this.getOptionsProp<GrpcOptions>(this.options, 'maxSendMessageLength', GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH),
'grpc.max_receive_message_length': this.getOptionsProp<GrpcOptions>(this.options, 'maxReceiveMessageLength', GRPC_DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH)
});
const credentials = this.getOptionsProp<GrpcOptions>(
this.options,
'credentials',
);
server.bind(
this.url,
credentials || grpcPackage.ServerCredentials.createInsecure(),
);
return server;
}
public lookupPackage(root: any, packageName: string) {
/** Reference: https://github.com/kondi/rxjs-grpc */
let pkg = root;
for (const name of packageName.split(/\./)) {
pkg = pkg[name];
}
return pkg; |
public loadProto(): any {
try {
const file = this.getOptionsProp<GrpcOptions>(this.options, 'protoPath');
const loader = this.getOptionsProp<GrpcOptions>(this.options, 'loader');
const packageDefinition = grpcProtoLoaderPackage.loadSync(file, loader);
const packageObject = grpcPackage.loadPackageDefinition(
packageDefinition,
);
return packageObject;
} catch (err) {
const invalidProtoError = new InvalidProtoDefinitionException();
const message =
err && err.message ? err.message : invalidProtoError.message;
this.logger.error(message, invalidProtoError.stack);
throw invalidProtoError;
}
}
/**
* Recursively fetch all of the service methods available on loaded
* protobuf descriptor object, and collect those as an objects with
* dot-syntax full-path names.
*
* Example:
* for proto package Bundle.FirstService with service Events { rpc...
* will be resolved to object of (while loaded for Bundle package):
* {
* name: "FirstService.Events",
* service: {Object}
* }
*/
private collectDeepServices(
name: string,
grpcDefinition: any,
accumulator: { name: string; service: any }[],
) {
if (!isObject(grpcDefinition)) {
return;
}
const keysToTraverse = Object.keys(grpcDefinition);
// Traverse definitions or namespace extensions
for (const key of keysToTraverse) {
const nameExtended = this.parseDeepServiceName(name, key);
const deepDefinition = grpcDefinition[key];
const isServiceDefined = deepDefinition && !isUndefined(deepDefinition.service);
const isServiceBoolean = isServiceDefined
? deepDefinition.service !== false
: false;
if (isServiceDefined && isServiceBoolean) {
accumulator.push({
name: nameExtended,
service: deepDefinition,
});
}
// Continue recursion until objects end or service definition found
else {
this.collectDeepServices(nameExtended, deepDefinition, accumulator);
}
}
}
private parseDeepServiceName(name: string, key: string): string {
// If depth is zero then just return key
if (name.length === 0) {
return key;
}
// Otherwise add next through dot syntax
return name + '.' + key;
}
} | } |
main.go | package main
import (
"time"
"github.com/ericxiao417/ratelimit"
)
func main() | {
r, err := ratelimit.NewFixedWindowRateLimiter(&ratelimit.Config{
Limit: 5,
FixedInterval: 15 * time.Second,
})
if err != nil {
panic(err)
}
ratelimit.DoWork(r, 10)
} |
|
rendering_system.rs | use crate::components::*;
use crate::resources::*;
use crate::constants::TILE_WIDTH;
use ggez::graphics;
use ggez::graphics::DrawParam;
use ggez::graphics::Image;
use ggez::graphics::Color;
use ggez::nalgebra as na;
use ggez::Context;
use specs::{Join, ReadStorage, System, Read};
pub struct | <'a> {
pub context: &'a mut Context,
}
impl RenderingSystem<'_> {
pub fn draw_text(&mut self, text_string: &str, x: f32, y: f32) {
let text = graphics::Text::new(text_string);
let destination = na::Point2::new(x, y);
let color = Some(Color::new(0.0, 0.0, 0.0, 1.0));
let dimensions = na::Point2::new(0.0, 20.0);
graphics::queue_text(self.context, &text, dimensions, color);
graphics::draw_queued_text(
self.context,
graphics::DrawParam::new().dest(destination),
None,
graphics::FilterMode::Linear,
)
.expect("expected drawing queued text");
}
}
// System implementation
impl<'a> System<'a> for RenderingSystem<'a> {
// Data
type SystemData = (Read<'a, Gameplay>, ReadStorage<'a, Position>, ReadStorage<'a, Renderable>);
fn run(&mut self, data: Self::SystemData) {
let (gameplay, positions, renderables) = data;
// Clearing the screen (this gives us the backround colour)
graphics::clear(self.context, graphics::Color::new(0.95, 0.95, 0.95, 1.0));
// Get all the renderables with their positions and sort by the position z
// This will allow us to have entities layered visually.
let mut rendering_data = (&positions, &renderables).join().collect::<Vec<_>>();
rendering_data.sort_by_key(|&k| k.0.z);
// Iterate through all pairs of positions & renderables, load the image
// and draw it at the specified position.
for (position, renderable) in rendering_data.iter() {
// Load the image
let image = Image::new(self.context, renderable.path.clone()).expect("expected image");
let x = position.x as f32 * TILE_WIDTH;
let y = position.y as f32 * TILE_WIDTH;
// draw
let draw_params = DrawParam::new().dest(na::Point2::new(x, y));
graphics::draw(self.context, &image, draw_params).expect("expected render");
}
// Render any text
self.draw_text(&gameplay.state.to_string(), 525.0, 80.0);
self.draw_text(&gameplay.moves_count.to_string(), 525.0, 100.0);
// Finally, present the context, this will actually display everything
// on the screen.
graphics::present(self.context).expect("expected to present");
}
}
| RenderingSystem |
_windows.py | import sys
from dataclasses import dataclass
@dataclass
class WindowsConsoleFeatures:
"""Windows features available."""
vt: bool = False
"""The console supports VT codes."""
truecolor: bool = False
"""The console supports truecolor."""
try:
import ctypes
from ctypes import wintypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL) # type: ignore
except (AttributeError, ImportError, ValueError):
# Fallback if we can't load the Windows DLL
def | () -> WindowsConsoleFeatures:
features = WindowsConsoleFeatures()
return features
else:
STDOUT = -11
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
_GetConsoleMode = windll.kernel32.GetConsoleMode
_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD]
_GetConsoleMode.restype = wintypes.BOOL
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
def get_windows_console_features() -> WindowsConsoleFeatures:
"""Get windows console features.
Returns:
WindowsConsoleFeatures: An instance of WindowsConsoleFeatures.
"""
handle = _GetStdHandle(STDOUT)
console_mode = wintypes.DWORD()
result = _GetConsoleMode(handle, console_mode)
vt = bool(result and console_mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING)
truecolor = False
if vt:
win_version = sys.getwindowsversion()
truecolor = win_version.major > 10 or (
win_version.major == 10 and win_version.build >= 15063
)
features = WindowsConsoleFeatures(vt=vt, truecolor=truecolor)
return features
if __name__ == "__main__":
import platform
features = get_windows_console_features()
from rich import print
print(f'platform="{platform.system()}"')
print(repr(features))
| get_windows_console_features |
__init__.py | """Unit test package for codeforces2html.""" |
||
msg_init_target.rs | // This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
extern crate lightning;
use lightning::ln::msgs;
use lightning::util::reset_rng_state;
mod utils;
use utils::VecWriter;
#[inline]
pub fn do_test(data: &[u8]) {
reset_rng_state();
test_msg!(msgs::Init, data);
}
#[cfg(feature = "afl")]
#[macro_use] extern crate afl;
#[cfg(feature = "afl")]
fn main() |
#[cfg(feature = "honggfuzz")]
#[macro_use] extern crate honggfuzz;
#[cfg(feature = "honggfuzz")]
fn main() {
loop {
fuzz!(|data| {
do_test(data);
});
}
}
extern crate hex;
#[cfg(test)]
mod tests {
#[test]
fn duplicate_crash() {
super::do_test(&::hex::decode("00").unwrap());
}
}
| {
fuzz!(|data| {
do_test(data);
});
} |
message.rs | // Copyright 2021 Red Hat, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::Context;
use netlink_packet_core::{
DecodeError, NetlinkDeserializable, NetlinkHeader, NetlinkPayload,
NetlinkSerializable,
};
use netlink_packet_utils::{
nla::{DefaultNla, NlasIterator},
Emitable, Parseable, ParseableParametrized,
};
use crate::{
buffer::GENL_ID_CTRL, CtrlAttr, GenericNetlinkHeader,
GenericNetlinkMessageBuffer,
};
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum GenericNetlinkAttr {
Ctrl(Vec<CtrlAttr>),
Other(Vec<DefaultNla>),
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct GenericNetlinkMessage {
pub message_type: u16,
pub header: GenericNetlinkHeader,
pub nlas: GenericNetlinkAttr,
}
impl Emitable for GenericNetlinkMessage {
fn buffer_len(&self) -> usize {
self.header.buffer_len()
+ match &self.nlas {
GenericNetlinkAttr::Ctrl(nlas) => nlas.as_slice().buffer_len(),
GenericNetlinkAttr::Other(nlas) => nlas.as_slice().buffer_len(),
}
}
fn emit(&self, buffer: &mut [u8]) {
self.header.emit(buffer);
match &self.nlas {
GenericNetlinkAttr::Ctrl(nlas) => nlas
.as_slice()
.emit(&mut buffer[self.header.buffer_len()..]),
GenericNetlinkAttr::Other(nlas) => nlas
.as_slice()
.emit(&mut buffer[self.header.buffer_len()..]),
}
}
}
impl NetlinkSerializable<GenericNetlinkMessage> for GenericNetlinkMessage {
fn message_type(&self) -> u16 {
self.message_type
}
fn buffer_len(&self) -> usize {
<Self as Emitable>::buffer_len(self)
}
fn serialize(&self, buffer: &mut [u8]) {
self.emit(buffer)
}
}
impl NetlinkDeserializable<GenericNetlinkMessage> for GenericNetlinkMessage {
type Error = DecodeError;
fn deserialize(
header: &NetlinkHeader,
payload: &[u8],
) -> Result<Self, Self::Error> {
let buf = GenericNetlinkMessageBuffer::new(payload);
GenericNetlinkMessage::parse_with_param(&buf, header.message_type)
}
}
impl<'a, T: AsRef<[u8]> + ?Sized>
ParseableParametrized<GenericNetlinkMessageBuffer<&'a T>, u16>
for GenericNetlinkMessage
{
fn parse_with_param(
buf: &GenericNetlinkMessageBuffer<&'a T>,
message_type: u16,
) -> Result<Self, DecodeError> {
let header = GenericNetlinkHeader::parse(buf)
.context("failed to parse generic netlink message header")?;
match message_type {
GENL_ID_CTRL => |
_ => Err(format!("Unknown message type: {}", message_type).into()),
}
}
}
impl From<GenericNetlinkMessage> for NetlinkPayload<GenericNetlinkMessage> {
fn from(message: GenericNetlinkMessage) -> Self {
NetlinkPayload::InnerMessage(message)
}
}
| {
match GenericNetlinkMessageBuffer::new_checked(&buf.inner()) {
Ok(buf) => Ok(GenericNetlinkMessage {
message_type,
header,
nlas: {
let mut nlas = Vec::new();
let error_msg =
"failed to parse control message attributes";
for nla in NlasIterator::new(buf.payload()) {
let nla = &nla.context(error_msg)?;
let parsed =
CtrlAttr::parse(nla).context(error_msg)?;
nlas.push(parsed);
}
GenericNetlinkAttr::Ctrl(nlas)
},
}),
Err(e) => Err(e),
}
} |
rest_go_client.go | package generator
import (
"container/list"
"context"
"fmt"
stdtypes "go/types"
"path"
"strconv"
stdstrings "strings"
"github.com/swipe-io/strcase"
"github.com/swipe-io/swipe/v2/internal/domain/model"
"github.com/swipe-io/swipe/v2/internal/importer"
"github.com/swipe-io/swipe/v2/internal/usecase/generator"
"github.com/swipe-io/swipe/v2/internal/writer"
)
type restGoClientOptionsGateway interface {
Interfaces() model.Interfaces
MethodOption(m model.ServiceMethod) model.MethodOption
UseFast() bool
}
type restGoClient struct {
writer.GoLangWriter
options restGoClientOptionsGateway
i *importer.Importer
}
func (g *restGoClient) Prepare(_ context.Context) error {
return nil
}
func (g *restGoClient) Process(_ context.Context) error {
for i := 0; i < g.options.Interfaces().Len(); i++ {
var (
kitHTTPPkg string
contextPkg string
httpPkg string
jsonPkg string
fmtPkg string
urlPkg string
netPkg string
stringsPkg string
pkgIO string
)
iface := g.options.Interfaces().At(i)
name := iface.UcName()
if iface.Namespace() != "" {
name = strcase.ToCamel(iface.Namespace())
}
clientType := name + "Client"
if g.options.UseFast() {
kitHTTPPkg = g.i.Import("fasthttp", "github.com/l-vitaly/go-kit/transport/fasthttp")
} else {
kitHTTPPkg = g.i.Import("http", "github.com/go-kit/kit/transport/http")
}
if g.options.UseFast() {
httpPkg = g.i.Import("fasthttp", "github.com/valyala/fasthttp")
} else {
httpPkg = g.i.Import("http", "net/http")
}
jsonPkg = g.i.Import("ffjson", "github.com/pquerna/ffjson/ffjson")
pkgIO = g.i.Import("io", "io")
fmtPkg = g.i.Import("fmt", "fmt")
contextPkg = g.i.Import("context", "context")
urlPkg = g.i.Import("url", "net/url")
netPkg = g.i.Import("net", "net")
stringsPkg = g.i.Import("strings", "strings")
if g.options.Interfaces().Len() == 1 {
g.W("// Deprecated\nfunc NewClientREST(tgt string")
g.W(" ,options ...ClientOption")
g.W(") (*%s, error) {\n", clientType)
g.W("return NewClientREST%s(tgt, options...)", name)
g.W("}\n")
}
g.W("func NewClientREST%s(tgt string", name)
g.W(" ,options ...ClientOption")
g.W(") (*%s, error) {\n", clientType)
g.W("opts := &clientOpts{}\n")
g.W("c := &%s{}\n", clientType)
g.W("for _, o := range options {\n")
g.W("o(opts)\n")
g.W("}\n")
g.W("if %s.HasPrefix(tgt, \"[\") {\n", stringsPkg)
g.W("host, port, err := %s.SplitHostPort(tgt)\n", netPkg)
g.WriteCheckErr(func() {
g.W("return nil, err")
})
g.W("tgt = host + \":\" + port\n")
g.W("}\n")
g.W("u, err := %s.Parse(tgt)\n", urlPkg)
g.WriteCheckErr(func() {
g.W("return nil, err")
})
g.W("if u.Scheme == \"\" {\n")
g.W("u.Scheme = \"https\"")
g.W("}\n")
for _, m := range iface.Methods() {
epName := m.IfaceLcName + "Endpoint"
mopt := g.options.MethodOption(m)
httpMethod := mopt.MethodName
if httpMethod == "" {
httpMethod = "GET"
}
pathStr := mopt.Path
if pathStr == "" {
pathStr = path.Join("/", strcase.ToKebab(m.Name))
}
if iface.Namespace() != "" {
pathStr = path.Join("/", strcase.ToKebab(iface.Namespace()), "/", pathStr)
}
var (
pathVars []*stdtypes.Var
queryVars []*stdtypes.Var
headerVars []*stdtypes.Var
)
for _, p := range m.Params {
if regexp, ok := mopt.PathVars[p.Name()]; ok {
if regexp != "" {
regexp = ":" + regexp
}
pathStr = stdstrings.Replace(pathStr, "{"+p.Name()+regexp+"}", "%s", -1)
pathVars = append(pathVars, p)
} else if _, ok := mopt.QueryVars[p.Name()]; ok {
queryVars = append(queryVars, p)
} else if _, ok := mopt.HeaderVars[p.Name()]; ok {
headerVars = append(headerVars, p)
}
}
remainingParams := len(m.Params) - (len(pathVars) + len(queryVars) + len(headerVars))
g.W("c.%s = %s.NewClient(\n", epName, kitHTTPPkg)
if mopt.Expr != nil {
writer.WriteAST(g, g.i, mopt.Expr)
} else {
g.W(strconv.Quote(httpMethod))
}
g.W(",\n")
g.W("u,\n")
if mopt.ClientRequestFunc.Expr != nil {
writer.WriteAST(g, g.i, mopt.ClientRequestFunc.Expr)
} else {
g.W("func(_ %s.Context, r *%s.Request, request interface{}) error {\n", contextPkg, httpPkg)
if len(m.Params) > 0 {
g.W("req, ok := request.(%s)\n", m.NameRequest)
g.W("if !ok {\n")
g.W("return %s.Errorf(\"couldn't assert request as %s, got %%T\", request)\n", fmtPkg, m.NameRequest)
g.W("}\n")
}
if g.options.UseFast() {
g.W("r.Header.SetMethod(")
} else {
g.W("r.Method = ")
}
if mopt.Expr != nil {
writer.WriteAST(g, g.i, mopt.Expr)
} else {
g.W(strconv.Quote(httpMethod))
}
if g.options.UseFast() {
g.W(")")
}
g.W("\n")
if g.options.UseFast() && remainingParams > 0 {
g.W("r.Header.Set(\"Content-Type\", \"application/json\")\n")
} else if remainingParams > 0 {
g.W("r.Header.Set(\"Content-Type\", \"application/json\")\n")
}
pathVarNames := make([]string, 0, len(pathVars))
for _, p := range pathVars {
name := p.Name() + "Str"
pathVarNames = append(pathVarNames, name)
g.WriteFormatType(g.i.Import, name, "req."+strcase.ToCamel(p.Name()), p)
}
if g.options.UseFast() {
g.W("r.SetRequestURI(")
} else {
g.W("r.URL.Path += ")
}
if len(pathVars) > 0 {
g.W("%s.Sprintf(%s, %s)", fmtPkg, strconv.Quote(pathStr), stdstrings.Join(pathVarNames, ", "))
} else {
g.W(strconv.Quote(pathStr))
}
if g.options.UseFast() {
g.W(")")
}
g.W("\n")
if len(queryVars) > 0 {
if g.options.UseFast() {
g.W("q := r.URI().QueryArgs()\n")
} else {
g.W("q := r.URL.Query()\n")
}
for _, p := range queryVars {
name := p.Name() + "Str"
g.WriteFormatType(g.i.Import, name, "req."+strcase.ToCamel(p.Name()), p)
g.W("q.Add(%s, %s)\n", strconv.Quote(mopt.QueryVars[p.Name()]), name)
}
if g.options.UseFast() {
g.W("r.URI().SetQueryString(q.String())\n")
} else {
g.W("r.URL.RawQuery = q.Encode()\n")
}
}
for _, p := range headerVars {
name := p.Name() + "Str"
g.WriteFormatType(g.i.Import, name, "req."+strcase.ToCamel(p.Name()), p)
g.W("r.Header.Add(%s, %s)\n", strconv.Quote(mopt.HeaderVars[p.Name()]), name)
}
switch stdstrings.ToUpper(httpMethod) {
case "POST", "PUT", "PATCH":
if remainingParams > 0 {
jsonPkg := g.i.Import("ffjson", "github.com/pquerna/ffjson/ffjson")
g.W("data, err := %s.Marshal(req)\n", jsonPkg)
g.W("if err != nil {\n")
g.W("return %s.Errorf(\"couldn't marshal request %%T: %%s\", req, err)\n", fmtPkg)
g.W("}\n")
if g.options.UseFast() {
g.W("r.SetBody(data)\n")
} else {
ioutilPkg := g.i.Import("ioutil", "io/ioutil")
bytesPkg := g.i.Import("bytes", "bytes")
g.W("r.Body = %s.NopCloser(%s.NewBuffer(data))\n", ioutilPkg, bytesPkg)
}
}
}
g.W("return nil\n")
g.W("}")
}
g.W(",\n")
if mopt.ClientResponseFunc.Expr != nil | else {
g.W("func(_ %s.Context, r *%s.Response) (interface{}, error) {\n", contextPkg, httpPkg)
statusCode := "r.StatusCode"
if g.options.UseFast() {
statusCode = "r.StatusCode()"
}
g.W("if statusCode := %s; statusCode != %s.StatusOK {\n", statusCode, httpPkg)
g.W("return nil, %sErrorDecode(statusCode)\n", m.IfaceLcName)
g.W("}\n")
if len(m.Results) > 0 {
var responseType string
if m.ResultsNamed {
responseType = m.NameRequest
} else {
responseType = stdtypes.TypeString(m.Results[0].Type(), g.i.QualifyPkg)
}
var structPath string
if mopt.WrapResponse.Enable {
parts := stdstrings.Split(mopt.WrapResponse.Name, ".")
var fn func(e *list.Element) string
fn = func(e *list.Element) (out string) {
if next := e.Next(); next != nil {
out += " struct { "
out += strcase.ToCamel(e.Value.(string))
out += fn(next)
out += "}"
} else {
out += fmt.Sprintf(" struct {\nData %s `json:\"%s\"`\n}", responseType, e.Value)
}
if prev := e.Prev(); prev != nil {
out += " `json:\"" + prev.Value.(string) + "\"`"
}
return out
}
l := list.New()
if len(parts) > 0 {
structPath = strcase.ToCamel(parts[0])
e := l.PushFront(parts[0])
for i := 1; i < len(parts); i++ {
if i != len(parts)-1 {
structPath += strcase.ToCamel(parts[i])
}
e = l.InsertAfter(parts[i], e)
}
}
g.W("var resp %s\n", fn(l.Front()))
} else {
g.W("var resp %s\n", responseType)
}
if g.options.UseFast() {
g.W("err := %s.Unmarshal(r.Body(), ", jsonPkg)
} else {
ioutilPkg := g.i.Import("ioutil", "io/ioutil")
g.W("b, err := %s.ReadAll(r.Body)\n", ioutilPkg)
g.WriteCheckErr(func() {
g.W("return nil, err\n")
})
g.W("err = %s.Unmarshal(b, ", jsonPkg)
}
g.W("&resp)\n")
g.W("if err != nil && err != %s.EOF {\n", pkgIO)
g.W("return nil, %s.Errorf(\"couldn't unmarshal body to %s: %%s\", err)\n", fmtPkg, m.NameRequest)
g.W("}\n")
if mopt.WrapResponse.Enable {
g.W("return resp.%s.Data, nil\n", structPath)
} else {
g.W("return resp, nil\n")
}
} else {
g.W("return nil, nil\n")
}
g.W("}")
}
g.W(",\n")
g.W("append(opts.genericClientOption, opts.%sClientOption...)...,\n", m.IfaceLcName)
g.W(").Endpoint()\n")
g.W(
"c.%[1]sEndpoint = middlewareChain(append(opts.genericEndpointMiddleware, opts.%[2]sEndpointMiddleware...))(c.%[1]sEndpoint)\n",
m.IfaceLcName,
m.IfaceLcName,
)
}
g.W("return c, nil\n")
g.W("}\n\n")
}
return nil
}
func (g *restGoClient) PkgName() string {
return ""
}
func (g *restGoClient) OutputDir() string {
return ""
}
func (g *restGoClient) Filename() string {
return "client_gen.go"
}
func (g *restGoClient) SetImporter(i *importer.Importer) {
g.i = i
}
func NewRestGoClient(
options restGoClientOptionsGateway,
) generator.Generator {
return &restGoClient{
options: options,
}
}
| {
writer.WriteAST(g, g.i, mopt.ClientResponseFunc.Expr)
} |
Mention.tsx | import React from 'react';
import { mount } from 'enzyme';
import { MentionProvider } from '@atlaskit/mention';
import { ResourcedMention } from '@atlaskit/mention/element';
import { ProviderFactory } from '../../../provider-factory';
import { ProfilecardProvider } from '../../../provider-factory/profile-card-provider';
import Mention from '../../../ui/Mention';
// avoid polluting test logs with error message in console
// please ensure you fix it if you expect console.error to be thrown
// eslint-disable-next-line no-console
let consoleError = console.error;
describe('@atlaskit/editor-core/ui/Mention', () => {
beforeEach(() => {
// eslint-disable-next-line no-console
console.error = jest.fn();
});
afterEach(() => {
// eslint-disable-next-line no-console
console.error = consoleError;
});
it('should render resourced mention', () => {
const mention = mount(
<Mention id="abcd-abcd-abcd" text="@Oscar Wallhult" />,
);
const resourcedMention = mention.find(ResourcedMention);
expect(resourcedMention.prop('id')).toEqual('abcd-abcd-abcd');
expect(resourcedMention.prop('text')).toEqual('@Oscar Wallhult');
});
it('should not render ResourcedMentionWithProfilecard if profilecardProvider is not set', () => {
const providerFactory = new ProviderFactory();
const mentionProvider = Promise.resolve({} as MentionProvider);
providerFactory.setProvider('mentionProvider', mentionProvider);
const mention = mount(
<Mention
id="abcd-abcd-abcd"
text="@Oscar Wallhult"
providers={providerFactory}
/>,
);
expect(mention.find('WithProfilecardMention')).toHaveLength(0);
mention.unmount();
});
it('should pass provider into resourced mention', () => {
const providerFactory = new ProviderFactory();
const mentionProvider = Promise.resolve({} as MentionProvider);
providerFactory.setProvider('mentionProvider', mentionProvider);
const mention = mount(
<Mention
id="abcd-abcd-abcd"
text="@Oscar Wallhult"
providers={providerFactory}
/>,
);
const resourcedMention = mention.find(ResourcedMention);
expect(resourcedMention.prop('mentionProvider')).toEqual(mentionProvider);
mention.unmount();
});
it('should not render ResourcedMentionWithProfilecard if profilecardProvider promise is rejected', async () => {
const providerFactory = new ProviderFactory();
const profilecardProvider = Promise.reject(new Error());
providerFactory.setProvider('profilecardProvider', profilecardProvider);
const mention = mount(
<Mention
id="abcd-abcd-abcd"
text="@Oscar Wallhult"
providers={providerFactory}
/>,
); | } catch (err) {
expect(mention.find('WithProfilecardMention')).toHaveLength(0);
}
mention.unmount();
});
['HipChat', 'all', 'here'].forEach((genericUserId) => {
it(`should not render ResourcedMentionWithProfilecard if id is generic (${genericUserId})`, async () => {
const providerFactory = new ProviderFactory();
const profilecardProvider = Promise.resolve({} as ProfilecardProvider);
providerFactory.setProvider('profilecardProvider', profilecardProvider);
const mention = mount(
<Mention
id={genericUserId}
text="@Oscar Wallhult"
providers={providerFactory}
/>,
);
await profilecardProvider;
expect(mention.find('WithProfilecardMention')).toHaveLength(0);
mention.unmount();
});
});
}); |
try {
await profilecardProvider; |
content-link.model.ts | /*!
* @license
* Copyright 2019 Alfresco Software, Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable @angular-eslint/component-selector */
import { RelatedContentRepresentation } from '@alfresco/js-api';
export class ContentLinkModel implements RelatedContentRepresentation {
contentAvailable: boolean;
created: Date;
createdBy: any;
id: number;
nodeId: string;
link: boolean;
mimeType: string;
name: string;
previewStatus: string;
relatedContent: boolean;
simpleType: string;
thumbnailUrl: string;
contentRawUrl: string;
contentBlob: Blob;
thumbnailStatus: string;
constructor(obj?: any) {
this.contentAvailable = obj && obj.contentAvailable;
this.created = obj && obj.created;
this.createdBy = obj && obj.createdBy || {};
this.id = obj && obj.id;
this.link = obj && obj.link;
this.mimeType = obj && obj.mimeType;
this.name = obj && obj.name;
this.previewStatus = obj && obj.previewStatus; | this.simpleType = obj && obj.simpleType;
this.thumbnailStatus = obj && obj.thumbnailStatus;
this.nodeId = obj && obj.nodeId;
}
hasPreviewStatus(): boolean {
return this.previewStatus === 'supported';
}
isTypeImage(): boolean {
return this.simpleType === 'image';
}
isTypePdf(): boolean {
return this.simpleType === 'pdf';
}
isTypeDoc(): boolean {
return this.simpleType === 'word' || this.simpleType === 'content';
}
isThumbnailReady(): boolean {
return this.thumbnailStatus === 'created';
}
isThumbnailSupported(): boolean {
return this.isTypeImage() || ((this.isTypePdf() || this.isTypeDoc()) && this.isThumbnailReady());
}
} | this.relatedContent = obj && obj.relatedContent; |
monomorphize.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::link::mangle_exported_name;
use driver::session;
use lib::llvm::ValueRef;
use middle::trans::base::{set_llvm_fn_attrs, set_inline_hint};
use middle::trans::base::{trans_enum_variant,push_ctxt};
use middle::trans::base::{trans_fn, decl_internal_rust_fn};
use middle::trans::base::{get_item_val, no_self};
use middle::trans::base;
use middle::trans::common::*;
use middle::trans::meth;
use middle::trans::intrinsic;
use middle::ty;
use middle::typeck;
use util::ppaux::Repr;
use syntax::ast;
use syntax::ast_map;
use syntax::ast_util::local_def;
pub fn monomorphic_fn(ccx: @mut CrateContext,
fn_id: ast::DefId,
real_substs: &ty::substs,
vtables: Option<typeck::vtable_res>,
self_vtables: Option<typeck::vtable_param_res>,
ref_id: Option<ast::NodeId>)
-> (ValueRef, bool)
{
debug2!("monomorphic_fn(\
fn_id={}, \
real_substs={}, \
vtables={}, \
self_vtable={}, \
ref_id={:?})",
fn_id.repr(ccx.tcx),
real_substs.repr(ccx.tcx),
vtables.repr(ccx.tcx),
self_vtables.repr(ccx.tcx),
ref_id);
assert!(real_substs.tps.iter().all(|t| !ty::type_needs_infer(*t)));
let _icx = push_ctxt("monomorphic_fn");
let mut must_cast = false;
let psubsts = @param_substs {
tys: real_substs.tps.to_owned(),
vtables: vtables,
self_ty: real_substs.self_ty.clone(),
self_vtables: self_vtables
};
for s in real_substs.tps.iter() { assert!(!ty::type_has_params(*s)); }
for s in psubsts.tys.iter() { assert!(!ty::type_has_params(*s)); }
let hash_id = make_mono_id(ccx, fn_id, &*psubsts);
if hash_id.params.iter().any(
|p| match *p { mono_precise(_, _) => false, _ => true }) {
must_cast = true;
}
debug2!("monomorphic_fn(\
fn_id={}, \
psubsts={}, \
hash_id={:?})",
fn_id.repr(ccx.tcx),
psubsts.repr(ccx.tcx),
hash_id);
match ccx.monomorphized.find(&hash_id) {
Some(&val) => {
debug2!("leaving monomorphic fn {}",
ty::item_path_str(ccx.tcx, fn_id));
return (val, must_cast);
}
None => ()
}
let tpt = ty::lookup_item_type(ccx.tcx, fn_id);
let llitem_ty = tpt.ty;
// We need to do special handling of the substitutions if we are
// calling a static provided method. This is sort of unfortunate.
let mut is_static_provided = None;
let map_node = session::expect(
ccx.sess,
ccx.tcx.items.find_copy(&fn_id.node),
|| format!("While monomorphizing {:?}, couldn't find it in the item map \
(may have attempted to monomorphize an item \
defined in a different crate?)", fn_id));
// Get the path so that we can create a symbol
let (pt, name, span) = match map_node {
ast_map::node_item(i, pt) => (pt, i.ident, i.span),
ast_map::node_variant(ref v, enm, pt) => (pt, (*v).node.name, enm.span),
ast_map::node_method(m, _, pt) => (pt, m.ident, m.span),
ast_map::node_foreign_item(i, abis, _, pt) if abis.is_intrinsic()
=> (pt, i.ident, i.span),
ast_map::node_foreign_item(*) => {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_id.node), true);
}
ast_map::node_trait_method(@ast::provided(m), _, pt) => {
// If this is a static provided method, indicate that
// and stash the number of params on the method.
if m.explicit_self.node == ast::sty_static {
is_static_provided = Some(m.generics.ty_params.len());
}
(pt, m.ident, m.span)
}
ast_map::node_trait_method(@ast::required(_), _, _) => {
ccx.tcx.sess.bug("Can't monomorphize a required trait method")
}
ast_map::node_expr(*) => {
ccx.tcx.sess.bug("Can't monomorphize an expr")
}
ast_map::node_stmt(*) => {
ccx.tcx.sess.bug("Can't monomorphize a stmt")
}
ast_map::node_arg(*) => ccx.tcx.sess.bug("Can't monomorphize an arg"),
ast_map::node_block(*) => {
ccx.tcx.sess.bug("Can't monomorphize a block")
}
ast_map::node_local(*) => {
ccx.tcx.sess.bug("Can't monomorphize a local")
}
ast_map::node_callee_scope(*) => {
ccx.tcx.sess.bug("Can't monomorphize a callee-scope")
}
ast_map::node_struct_ctor(_, i, pt) => (pt, i.ident, i.span)
};
debug2!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx));
let mono_ty = match is_static_provided {
None => ty::subst_tps(ccx.tcx, psubsts.tys,
psubsts.self_ty, llitem_ty),
Some(num_method_ty_params) => {
// Static default methods are a little unfortunate, in
// that the "internal" and "external" type of them differ.
// Internally, the method body can refer to Self, but the
// externally visable type of the method has a type param
// inserted in between the trait type params and the
// method type params. The substs that we are given are
// the proper substs *internally* to the method body, so
// we have to use those when compiling it.
//
// In order to get the proper substitution to use on the
// type of the method, we pull apart the substitution and
// stick a substitution for the self type in.
// This is a bit unfortunate.
let idx = psubsts.tys.len() - num_method_ty_params;
let substs =
(psubsts.tys.slice(0, idx) +
&[psubsts.self_ty.unwrap()] +
psubsts.tys.tailn(idx));
debug2!("static default: changed substitution to {}",
substs.repr(ccx.tcx));
ty::subst_tps(ccx.tcx, substs, None, llitem_ty)
}
};
let f = match ty::get(mono_ty).sty {
ty::ty_bare_fn(ref f) => {
assert!(f.abis.is_rust() || f.abis.is_intrinsic());
f
}
_ => fail2!("expected bare rust fn or an intrinsic")
};
ccx.stats.n_monos += 1;
let depth = match ccx.monomorphizing.find(&fn_id) {
Some(&d) => d, None => 0
};
// Random cut-off -- code that needs to instantiate the same function
// recursively more than thirty times can probably safely be assumed to be
// causing an infinite expansion.
if depth > 30 {
ccx.sess.span_fatal(
span, "overly deep expansion of inlined function");
}
ccx.monomorphizing.insert(fn_id, depth + 1);
let (_, elt) = gensym_name(ccx.sess.str_of(name));
let mut pt = (*pt).clone();
pt.push(elt);
let s = mangle_exported_name(ccx, pt.clone(), mono_ty);
debug2!("monomorphize_fn mangled to {}", s);
let mk_lldecl = || {
let lldecl = decl_internal_rust_fn(ccx, f.sig.inputs, f.sig.output, s);
ccx.monomorphized.insert(hash_id, lldecl);
lldecl
};
let lldecl = match map_node {
ast_map::node_item(i@@ast::item {
node: ast::item_fn(ref decl, _, _, _, ref body),
_
}, _) => {
let d = mk_lldecl();
set_llvm_fn_attrs(i.attrs, d);
trans_fn(ccx,
pt,
decl,
body,
d,
no_self,
Some(psubsts),
fn_id.node,
[]);
d
}
ast_map::node_item(*) => {
ccx.tcx.sess.bug("Can't monomorphize this kind of item")
}
ast_map::node_foreign_item(i, _, _, _) => {
let d = mk_lldecl();
intrinsic::trans_intrinsic(ccx, d, i, pt, psubsts, i.attrs,
ref_id);
d
}
ast_map::node_variant(ref v, enum_item, _) => {
let tvs = ty::enum_variants(ccx.tcx, local_def(enum_item.id));
let this_tv = *tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl();
set_inline_hint(d);
match v.node.kind {
ast::tuple_variant_kind(ref args) => {
trans_enum_variant(ccx,
enum_item.id,
v,
(*args).clone(),
this_tv.disr_val,
Some(psubsts),
d);
}
ast::struct_variant_kind(_) =>
ccx.tcx.sess.bug("can't monomorphize struct variants"),
}
d
}
ast_map::node_method(mth, _, _) => {
// XXX: What should the self type be here?
let d = mk_lldecl();
set_llvm_fn_attrs(mth.attrs, d);
meth::trans_method(ccx, pt, mth, Some(psubsts), d);
d
}
ast_map::node_trait_method(@ast::provided(mth), _, pt) => {
let d = mk_lldecl();
set_llvm_fn_attrs(mth.attrs, d);
meth::trans_method(ccx, (*pt).clone(), mth, Some(psubsts), d);
d
}
ast_map::node_struct_ctor(struct_def, _, _) => {
let d = mk_lldecl();
set_inline_hint(d);
base::trans_tuple_struct(ccx,
struct_def.fields,
struct_def.ctor_id.expect("ast-mapped tuple struct \
didn't have a ctor id"),
Some(psubsts),
d);
d
}
// Ugh -- but this ensures any new variants won't be forgotten
ast_map::node_expr(*) |
ast_map::node_stmt(*) |
ast_map::node_trait_method(*) |
ast_map::node_arg(*) |
ast_map::node_block(*) |
ast_map::node_callee_scope(*) |
ast_map::node_local(*) => {
ccx.tcx.sess.bug(format!("Can't monomorphize a {:?}", map_node))
}
};
ccx.monomorphizing.insert(fn_id, depth);
debug2!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx, fn_id));
(lldecl, must_cast)
}
pub fn | (ccx: @mut CrateContext,
item: ast::DefId,
substs: ¶m_substs) -> mono_id {
// FIXME (possibly #5801): Need a lot of type hints to get
// .collect() to work.
let substs_iter = substs.self_ty.iter().chain(substs.tys.iter());
let precise_param_ids: ~[(ty::t, Option<@~[mono_id]>)] = match substs.vtables {
Some(vts) => {
debug2!("make_mono_id vtables={} substs={}",
vts.repr(ccx.tcx), substs.tys.repr(ccx.tcx));
let vts_iter = substs.self_vtables.iter().chain(vts.iter());
vts_iter.zip(substs_iter).map(|(vtable, subst)| {
let v = vtable.map(|vt| meth::vtable_id(ccx, vt));
(*subst, if !v.is_empty() { Some(@v) } else { None })
}).collect()
}
None => substs_iter.map(|subst| (*subst, None::<@~[mono_id]>)).collect()
};
let param_ids = precise_param_ids.iter().map(|x| {
let (a, b) = *x;
mono_precise(a, b)
}).collect();
@mono_id_ {def: item, params: param_ids}
}
| make_mono_id |
index.ts | import {
getPathsRelativeToConfig,
IApiCliConfig,
IIgnoreRunnable,
InvalidOpticConfigurationSyntaxError,
IOpticTaskRunnerConfig,
OpticConfigurationLocationFailure,
readApiConfig,
} from '@useoptic/cli-config';
import { IHttpInteraction } from '@useoptic/domain-types';
////////////////////////////////////////////////////////////////////////////////
import { Command } from '@oclif/command';
export { Command };
////////////////////////////////////////////////////////////////////////////////
export { Client as SaasClient } from './saas-client';
////////////////////////////////////////////////////////////////////////////////
export { CommandAndProxySessionManager } from './command-and-proxy-session-manager';
export { CommandSession } from './command-session';
export { HttpToolkitCapturingProxy } from './httptoolkit-capturing-proxy';
////////////////////////////////////////////////////////////////////////////////
import { developerDebugLogger, userDebugLogger } from './logger';
import { colors, fromOptic, promiseFromOptic } from './conversation';
export { developerDebugLogger, userDebugLogger, promiseFromOptic };
////////////////////////////////////////////////////////////////////////////////
export interface ICaptureManifest {
samples: IHttpInteraction[];
}
export interface ICaptureLoader {
load(): Promise<ICaptureManifest>;
loadWithFilter(filter: IIgnoreRunnable): Promise<ICaptureManifest>;
}
export interface ICaptureSaver {
init(): Promise<void>;
save(sample: IHttpInteraction): Promise<void>;
cleanup(): Promise<void>;
}
////////////////////////////////////////////////////////////////////////////////
export interface ICliDaemonState {
port: number;
}
| export function makeUiBaseUrl(daemonState: ICliDaemonState) {
if (process.env.OPTIC_UI_HOST) {
return process.env.OPTIC_UI_HOST;
}
return `http://localhost:${daemonState.port}`;
}
////////////////////////////////////////////////////////////////////////////////
export interface IOpticTaskRunner {
run(
cli: Command,
cliConfig: IApiCliConfig,
taskConfig: IOpticTaskRunnerConfig,
commandToRunWhenStarted?: string
): Promise<void>;
}
////////////////////////////////////////////////////////////////////////////////
export { CaptureSaver as SaasCaptureSaver } from './captures/avro/saas/capture-saver';
export { CaptureSaver as FileSystemAvroCaptureSaver } from './captures/avro/file-system/capture-saver';
export { CaptureLoader as FileSystemAvroCaptureLoader } from './captures/avro/file-system/capture-loader';
////////////////////////////////////////////////////////////////////////////////
export { fromOptic, errorFromOptic, warningFromOptic } from './conversation';
////////////////////////////////////////////////////////////////////////////////
export async function loadPathsAndConfig(cli: Command) {
try {
const paths = await getPathsRelativeToConfig();
const config = await readApiConfig(paths.configPath);
return {
paths,
config,
};
} catch (e) {
userDebugLogger(e);
if (e instanceof OpticConfigurationLocationFailure) {
cli.log(
fromOptic(
`No Optic project found in this directory. Learn to add Optic to your project here ${colors.underline(
'https://app.useoptic.com'
)}`
)
);
} else if (e instanceof InvalidOpticConfigurationSyntaxError) {
cli.log(fromOptic(`The contents of optic.yml are not valid YAML`));
}
return await cleanupAndExit();
}
}
export function cleanupAndExit() {
return process.exit(0);
}
////////////////////////////////////////////////////////////////////////////////
export async function delay(milliseconds: number) {
return new Promise((resolve, reject) => {
setTimeout(resolve, milliseconds);
});
}
////////////////////////////////////////////////////////////////////////////////
import * as uuid from 'uuid';
export interface IdGenerator<T> {
nextId(): T;
}
export class DefaultIdGenerator implements IdGenerator<string> {
nextId() {
return uuid.v4();
}
} | |
i3d_encoder.py | # Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
from gnes.encoder.base import BaseVideoEncoder
from gnes.helper import batching, get_first_available_gpu
class I3dEncoder(BaseVideoEncoder):
batch_size = 1
def __init__(self, model_dir: str,
output_layer: str,
num_classes: int = 400,
frame_size_x: int = 224,
frame_size_y: int = 224,
num_frame_per_clib: int = 16,
rgb_channels: int = 3,
on_gpu: bool = False,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.model_dir = model_dir
self.output_layer = output_layer
self.num_classes = num_classes
self.frame_size_x = frame_size_x
self.frame_size_y = frame_size_y
self.num_frame_per_clib = num_frame_per_clib
self.rgb_channels = rgb_channels
self.on_gpu = on_gpu
def post_init(self):
import tensorflow as tf
from i3d_cores.i3d import InceptionI3d
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(get_first_available_gpu())
with tf.Graph().as_default():
self.rgb_images_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,
self.num_frame_per_clib,
self.frame_size_x,
self.frame_size_y,
self.rgb_channels))
is_training = False
with tf.variable_scope('RGB'):
self.feature, _ = InceptionI3d(
num_classes=self.num_classes,
spatial_squeeze=True,
final_endpoint=self.output_layer,
name='inception_i3d' |
config = tf.ConfigProto(log_device_placement=False)
if self.on_gpu:
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(init)
checkpoint_file = self.model_dir
meta_graph_location = self.model_dir + '.meta'
saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True)
saver.restore(self.sess, checkpoint_file)
def encode(self, data: List['np.ndarray'], *args, **kwargs) -> np.ndarray:
def _padding(data):
_data = np.array(
[np.concatenate((d, np.zeros((self.num_frame_per_clib - d.shape[0],
self.frame_size_x,
self.frame_size_y,
self.rgb_channels), dtype=np.float32)), axis=0)
if d.shape[0] < self.num_frame_per_clib else d[:self.num_frame_per_clib] for d in data])
return _data
@batching
def _encode(_, data):
feature, = self.sess.run([self.feature], feed_dict={self.rgb_images_placeholder: data})
return np.array(feature).astype(np.float32)
return _encode(self, _padding(data)) | )(self.rgb_images_placeholder, is_training)
init = tf.global_variables_initializer() |
0001_initial.py | # Generated by Django 3.2.4 on 2021-12-15 01:23
from django.db import migrations, models
import django.db.models.deletion
class | (migrations.Migration):
initial = True
dependencies = [
("submission", "0062_cfp_settings_data"),
]
operations = [
migrations.CreateModel(
name="YouTubeLink",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False
),
),
("video_id", models.CharField(max_length=20)),
(
"submission",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="youtube_link",
to="submission.submission",
),
),
],
),
]
| Migration |
SessionTimeout.js | import React, { Component } from "react";
import { connect } from "react-redux";
import { withRouter } from "react-router-dom";
import {
removeActiveModal,
saveDraftBeforeLogout,
saveUser,
} from "../../redux/actions";
import Helper from "../../utils/Helper";
import "./style.scss";
const mapStateToProps = () => {
return {};
};
class | extends Component {
constructor(props) {
super(props);
this.state = {
timer: 60,
counter: 60,
intervalId: null,
};
}
componentDidMount() {
const intervalIdTemp = setInterval(() => {
const temp = this.state.counter;
this.setState({ counter: temp - 1 });
}, 1000);
setTimeout(() => {
this.logout();
}, this.state.timer * 1000);
this.setState({ intervalId: intervalIdTemp });
}
componentWillUnmount() {
clearInterval(this.state.intervalId);
}
logout = () => {
this.props.dispatch(saveDraftBeforeLogout(true));
Helper.storeUser({});
this.props.dispatch(saveUser({}));
this.hideModal();
setTimeout(() =>
alert("You have been logged out for inactivity. Please log back in.")
);
};
hideModal = () => {
this.props.dispatch(removeActiveModal());
};
render() {
return (
<div id="session-timeout-modal">
<h3 className="pb-3">Session Timeout</h3>
<p>{`You are being timeout in ${this.state.counter} sec due to inactivity. Please choose to stay signed in or to log off. Otherwise, you will be logged off automatically.`}</p>
<div className="actions">
<button className="btn btn-primary-outline" onClick={this.logout}>
Log off
</button>
<button className="btn btn-primary" onClick={this.hideModal}>
Stay Logged in ({this.state.counter})
</button>
</div>
</div>
);
}
}
export default connect(mapStateToProps)(withRouter(SessionTimeout));
| SessionTimeout |
Party.ts | /* eslint-disable prefer-promise-reject-errors */
import { Message, MessageReaction, TextChannel } from 'discord.js';
import { debounce, noOp, sleep, Time } from 'e';
import { Extendable, ExtendableStore, KlasaMessage, KlasaUser } from 'klasa';
import { ReactionEmoji, SILENT_ERROR } from '../../lib/constants';
import { ClientSettings } from '../../lib/settings/types/ClientSettings';
import { CustomReactionCollector } from '../../lib/structures/CustomReactionCollector';
import { MakePartyOptions } from '../../lib/types';
const partyLockCache = new Set<string>();
setInterval(() => partyLockCache.clear(), Time.Minute * 20);
export async function | (
channel: TextChannel,
user: KlasaUser,
options: MakePartyOptions
): Promise<[KlasaUser[], () => Promise<KlasaUser[]>]> {
const usersWhoConfirmed: KlasaUser[] = [options.leader];
let deleted = false;
function getMessageContent() {
return `${options.message}\n\n**Users Joined:** ${usersWhoConfirmed
.map(u => u.username)
.join(
', '
)}\n\nThis party will automatically depart in 2 minutes, or if the leader clicks the start (start early) or stop button.`;
}
const confirmMessage = (await channel.send(getMessageContent())) as KlasaMessage;
async function addEmojis() {
await confirmMessage.react(ReactionEmoji.Join);
await sleep(50);
await confirmMessage.react(ReactionEmoji.Stop);
await sleep(50);
await confirmMessage.react(ReactionEmoji.Start);
}
addEmojis();
// Debounce message edits to prevent spam.
const updateUsersIn = debounce(() => {
if (deleted || confirmMessage.deleted) return;
confirmMessage.edit(getMessageContent());
}, 500);
const removeUser = (user: KlasaUser) => {
if (user === options.leader) return;
const index = usersWhoConfirmed.indexOf(user);
if (index !== -1) {
usersWhoConfirmed.splice(index, 1);
updateUsersIn();
}
};
const reactionAwaiter = () =>
new Promise<KlasaUser[]>(async (resolve, reject) => {
let partyCancelled = false;
const collector = new CustomReactionCollector(confirmMessage, {
time: 120_000,
max: options.usersAllowed?.length ?? options.maxSize,
dispose: true,
filter: async (reaction: MessageReaction, user: KlasaUser) => {
await user.settings.sync();
if (
(!options.ironmanAllowed && user.isIronman) ||
user.bot ||
user.minionIsBusy ||
!reaction.emoji.id ||
!user.hasMinion
) {
return false;
}
if (options.usersAllowed && !options.usersAllowed.includes(user.id)) {
return false;
}
if (options.customDenier && reaction.emoji.id === ReactionEmoji.Join) {
const [customDenied, reason] = await options.customDenier(user);
if (customDenied) {
user.send(`You couldn't join this mass, for this reason: ${reason}`);
return false;
}
}
return ([ReactionEmoji.Join, ReactionEmoji.Stop, ReactionEmoji.Start] as string[]).includes(
reaction.emoji.id
);
}
});
collector.on('remove', (reaction: MessageReaction, user: KlasaUser) => {
if (!usersWhoConfirmed.includes(user)) return false;
if (reaction.emoji.id !== ReactionEmoji.Join) return false;
partyLockCache.delete(user.id);
removeUser(user);
});
async function startTrip() {
await confirmMessage.delete().catch(noOp);
if (!partyCancelled && usersWhoConfirmed.length < options.minSize) {
channel.send(`${user} Not enough people joined your ${options.party ? 'party' : 'mass'}!`);
reject(new Error(SILENT_ERROR));
return;
}
resolve(usersWhoConfirmed);
}
collector.on('collect', async (reaction, user) => {
if (user.partial) await user.fetch();
if (user.client.settings?.get(ClientSettings.UserBlacklist).includes(user.id)) return;
switch (reaction.emoji.id) {
case ReactionEmoji.Join: {
if (usersWhoConfirmed.includes(user) || partyLockCache.has(user.id)) return;
if (options.usersAllowed && !options.usersAllowed.includes(user.id)) {
return;
}
// Add the user
usersWhoConfirmed.push(user);
partyLockCache.add(user.id);
updateUsersIn();
if (usersWhoConfirmed.length >= options.maxSize) {
collector.stop('everyoneJoin');
break;
}
break;
}
case ReactionEmoji.Stop: {
if (user === options.leader) {
partyCancelled = true;
reject(
`The leader (${options.leader.username}) cancelled this ${
options.party ? 'party' : 'mass'
}!`
);
collector.stop('partyCreatorEnd');
}
break;
}
case ReactionEmoji.Start: {
if (user === options.leader) {
startTrip();
collector.stop('partyCreatorEnd');
}
break;
}
default:
break;
}
});
collector.once('end', () => {
deleted = true;
confirmMessage.delete().catch(noOp);
for (const user of usersWhoConfirmed) {
partyLockCache.delete(user.id);
}
setTimeout(() => startTrip(), 750);
});
});
return [usersWhoConfirmed, reactionAwaiter];
}
export default class extends Extendable {
public constructor(store: ExtendableStore, file: string[], directory: string) {
super(store, file, directory, { appliesTo: [Message] });
}
async makePartyAwaiter(this: KlasaMessage, options: MakePartyOptions) {
if (this.channel.type !== 'text') throw new Error('Tried to make party in non-text channel.');
const [usersWhoConfirmed, reactionAwaiter] = await setupParty(this.channel, options.leader, options);
await reactionAwaiter();
return usersWhoConfirmed;
}
}
| setupParty |
rm.go | package imgcmd
import (
"io"
"github.com/lithammer/dedent"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/weaveworks/ignite/cmd/ignite/cmd/cmdutil"
"github.com/weaveworks/ignite/cmd/ignite/run"
)
// NewCmdRm removes images
func | (out io.Writer) *cobra.Command {
rf := &run.RmiFlags{}
cmd := &cobra.Command{
Use: "rm <image>...",
Short: "Remove VM base images",
Long: dedent.Dedent(`
Remove one or multiple VM base images. Images are matched by prefix based on
their ID and name. To remove multiple images, chain the matches separated by spaces.
The force flag (-f, --force) kills and removes any running VMs using the image.
`),
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(func() error {
ro, err := rf.NewRmiOptions(args)
if err != nil {
return err
}
return run.Rmi(ro)
}())
},
}
addRmiFlags(cmd.Flags(), rf)
return cmd
}
func addRmiFlags(fs *pflag.FlagSet, rf *run.RmiFlags) {
cmdutil.AddForceFlag(fs, &rf.Force)
}
| NewCmdRm |
sql_model.rs | use std::fmt::{Display, Formatter};
use crate::common::uid::*;
use crate::common::Uid;
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case" /* "camelCase" */)]
pub struct SqlModel {
#[serde(skip_serializing, skip_deserializing)]
pub id: i32,
#[serde(
rename(serialize = "id", deserialize = "id"),
skip_deserializing,
with = "uid"
)]
pub fake_id: Uid,
#[serde(default)]
pub status: i32,
pub created_at: Option<chrono::DateTime<chrono::Local>>,
pub updated_at: Option<chrono::DateTime<chrono::Local>>,
} | self.fake_id = Uid::new(self.id as u32, object_type, shard_id);
return self;
}
}
impl Display for SqlModel {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", serde_json::to_string(self).unwrap_or_default())
}
}
impl Default for SqlModel {
fn default() -> Self {
return Self {
id: 0,
fake_id: Default::default(),
status: 0,
created_at: Option::from(chrono::offset::Local::now()),
updated_at: Option::from(chrono::offset::Local::now()),
};
}
}
//datetime_local |
impl SqlModel {
pub fn mask(&mut self, object_type: u32, shard_id: u32) -> &mut Self { |
HellowWorldO0.py | """
HelloWorldO0.py
Copyright (c) 2020 by Robert Russell Millward. All rights reserved.
"""
from tkinter import *
class GenResearch(Frame):
def sayHi(self):
print("hi Bob");
def createWidgits(self): |
self.QUIT.pack({"side": "left"});
self.hi_there = Button(self);
self.hi_there["text"] = "Hello",
self.hi_there["command"] = self.sayHi;
self.hi_there.pack({"side": "left"});
def __init__(self, master=None):
Frame.__init__(self, master);
self.pack();
self.createWidgits();
root = Tk();
app = GenResearch(master=root);
app.mainloop();
root.destroy();
#END | self.QUIT = Button(self);
self.QUIT["text"] = "Quit";
self.QUIT["fg"] = "red";
self.QUIT["command"] = self.quit; |
pointnet_test.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pointnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car import pointnet
class PointNetTest(test_utils.TestCase, parameterized.TestCase):
def | (self, p, input_shape, expected_shape):
batch_size, num_points, _ = input_shape
g = tf.Graph()
with g.as_default():
net = p.Instantiate()
input_data = py_utils.NestedMap(
points=tf.random_uniform((batch_size, num_points, 3)),
features=tf.random_uniform(input_shape),
padding=tf.zeros((batch_size, num_points), dtype=tf.float32),
label=tf.random_uniform((batch_size,),
minval=0,
maxval=16,
dtype=tf.int32))
result = net.FPropDefaultTheta(input_data)
with self.session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
np_result = sess.run(result)
self.assertEqual(np_result.shape, expected_shape)
@parameterized.parameters((128, 3), (128, 9), (256, 3))
def testPointNetClassifier(self, feature_dims, input_dims):
p = pointnet.PointNet().Classifier(
input_dims=input_dims, feature_dims=feature_dims)
# Network should produce a global feature of feature_dims.
self.assertEqual(p.output_dim, feature_dims)
self._testOutShape(p, (8, 128, input_dims), (8, feature_dims))
def testPointNetSegmentation(self):
p = pointnet.PointNet().Segmentation()
# Network takes batch_size=8 input and produce 128-dim pointwise feature.
self.assertEqual(p.output_dim, 128)
self._testOutShape(p, (8, 100, 3), (8, 100, 128))
def testPointNetSegmentationShapeNet(self):
p = pointnet.PointNet().SegmentationShapeNet()
self.assertEqual(p.output_dim, 128)
self._testOutShape(p, (8, 2000, 3), (8, 2000, 128))
@parameterized.parameters((128, 3), (128, 9), (256, 3))
def testPointNetPPClassifier(self, feature_dims, input_dims):
p = pointnet.PointNetPP().Classifier(
input_dims=input_dims, feature_dims=feature_dims)
# Network should produce a global feature of feature_dims.
self.assertEqual(p.output_dim, feature_dims)
self._testOutShape(p, (8, 1024, input_dims), (8, feature_dims))
if __name__ == '__main__':
tf.test.main()
| _testOutShape |
lsr.rs | #[doc = "Register `LSR` reader"]
pub struct R(crate::R<LSR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<LSR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<LSR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<LSR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "RX Data Error in FIFO\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FIFOERR_A {
#[doc = "1: `1`"]
ERROR = 1,
}
impl From<FIFOERR_A> for bool {
#[inline(always)]
fn from(variant: FIFOERR_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `fifoerr` reader - RX Data Error in FIFO"]
pub type FIFOERR_R = crate::BitReader<FIFOERR_A>;
impl FIFOERR_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<FIFOERR_A> {
match self.bits {
true => Some(FIFOERR_A::ERROR),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline(always)]
pub fn is_error(&self) -> bool {
*self == FIFOERR_A::ERROR
}
}
#[doc = "Transmitter Empty\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TEMT_A {
#[doc = "1: `1`"]
EMPTY = 1,
}
impl From<TEMT_A> for bool {
#[inline(always)]
fn from(variant: TEMT_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `temt` reader - Transmitter Empty"]
pub type TEMT_R = crate::BitReader<TEMT_A>;
impl TEMT_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<TEMT_A> {
match self.bits {
true => Some(TEMT_A::EMPTY),
_ => None,
}
}
#[doc = "Checks if the value of the field is `EMPTY`"]
#[inline(always)]
pub fn is_empty(&self) -> bool {
*self == TEMT_A::EMPTY
}
}
#[doc = "TX Holding Register Empty\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum THRE_A {
#[doc = "1: `1`"]
EMPTY = 1,
}
impl From<THRE_A> for bool {
#[inline(always)]
fn from(variant: THRE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `thre` reader - TX Holding Register Empty"]
pub type THRE_R = crate::BitReader<THRE_A>;
impl THRE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<THRE_A> {
match self.bits {
true => Some(THRE_A::EMPTY),
_ => None,
}
}
#[doc = "Checks if the value of the field is `EMPTY`"]
#[inline(always)]
pub fn is_empty(&self) -> bool {
*self == THRE_A::EMPTY
}
}
#[doc = "Field `bi` reader - Break Interrupt"]
pub type BI_R = crate::BitReader<bool>;
#[doc = "Framing Error\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum | {
#[doc = "1: `1`"]
ERROR = 1,
}
impl From<FE_A> for bool {
#[inline(always)]
fn from(variant: FE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `fe` reader - Framing Error"]
pub type FE_R = crate::BitReader<FE_A>;
impl FE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<FE_A> {
match self.bits {
true => Some(FE_A::ERROR),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline(always)]
pub fn is_error(&self) -> bool {
*self == FE_A::ERROR
}
}
#[doc = "Parity Error\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PE_A {
#[doc = "1: `1`"]
ERROR = 1,
}
impl From<PE_A> for bool {
#[inline(always)]
fn from(variant: PE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `pe` reader - Parity Error"]
pub type PE_R = crate::BitReader<PE_A>;
impl PE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<PE_A> {
match self.bits {
true => Some(PE_A::ERROR),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline(always)]
pub fn is_error(&self) -> bool {
*self == PE_A::ERROR
}
}
#[doc = "Overrun Error\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum OE_A {
#[doc = "1: `1`"]
ERROR = 1,
}
impl From<OE_A> for bool {
#[inline(always)]
fn from(variant: OE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `oe` reader - Overrun Error"]
pub type OE_R = crate::BitReader<OE_A>;
impl OE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<OE_A> {
match self.bits {
true => Some(OE_A::ERROR),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline(always)]
pub fn is_error(&self) -> bool {
*self == OE_A::ERROR
}
}
#[doc = "Data Ready\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DR_A {
#[doc = "1: `1`"]
READY = 1,
}
impl From<DR_A> for bool {
#[inline(always)]
fn from(variant: DR_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `dr` reader - Data Ready"]
pub type DR_R = crate::BitReader<DR_A>;
impl DR_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<DR_A> {
match self.bits {
true => Some(DR_A::READY),
_ => None,
}
}
#[doc = "Checks if the value of the field is `READY`"]
#[inline(always)]
pub fn is_ready(&self) -> bool {
*self == DR_A::READY
}
}
impl R {
#[doc = "Bit 7 - RX Data Error in FIFO"]
#[inline(always)]
pub fn fifoerr(&self) -> FIFOERR_R {
FIFOERR_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 6 - Transmitter Empty"]
#[inline(always)]
pub fn temt(&self) -> TEMT_R {
TEMT_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 5 - TX Holding Register Empty"]
#[inline(always)]
pub fn thre(&self) -> THRE_R {
THRE_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 4 - Break Interrupt"]
#[inline(always)]
pub fn bi(&self) -> BI_R {
BI_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 3 - Framing Error"]
#[inline(always)]
pub fn fe(&self) -> FE_R {
FE_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 2 - Parity Error"]
#[inline(always)]
pub fn pe(&self) -> PE_R {
PE_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 1 - Overrun Error"]
#[inline(always)]
pub fn oe(&self) -> OE_R {
OE_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 0 - Data Ready"]
#[inline(always)]
pub fn dr(&self) -> DR_R {
DR_R::new((self.bits & 1) != 0)
}
}
#[doc = "UART Line Status Register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [lsr](index.html) module"]
pub struct LSR_SPEC;
impl crate::RegisterSpec for LSR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [lsr::R](R) reader structure"]
impl crate::Readable for LSR_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets LSR to value 0"]
impl crate::Resettable for LSR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| FE_A |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.