text
stringlengths 2
99.9k
| meta
dict |
---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains common generic and tag-based KASAN error reporting code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <[email protected]>
*
* Some code borrowed from https://github.com/xairy/kasan-prototype by
* Andrey Konovalov <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/bitops.h>
#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
#include <linux/module.h>
#include <asm/sections.h>
#include "kasan.h"
#include "../slab.h"
/* Shadow layout customization. */
#define SHADOW_BYTES_PER_BLOCK 1
#define SHADOW_BLOCKS_PER_ROW 16
#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
#define SHADOW_ROWS_AROUND_ADDR 2
static unsigned long kasan_flags;
#define KASAN_BIT_REPORTED 0
#define KASAN_BIT_MULTI_SHOT 1
bool kasan_save_enable_multi_shot(void)
{
return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
}
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
void kasan_restore_multi_shot(bool enabled)
{
if (!enabled)
clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
}
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
static int __init kasan_set_multi_shot(char *str)
{
set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
return 1;
}
__setup("kasan_multi_shot", kasan_set_multi_shot);
static void print_error_description(struct kasan_access_info *info)
{
pr_err("BUG: KASAN: %s in %pS\n",
get_bug_type(info), (void *)info->ip);
pr_err("%s of size %zu at addr %px by task %s/%d\n",
info->is_write ? "Write" : "Read", info->access_size,
info->access_addr, current->comm, task_pid_nr(current));
}
static DEFINE_SPINLOCK(report_lock);
static void start_report(unsigned long *flags)
{
/*
* Make sure we don't end up in loop.
*/
kasan_disable_current();
spin_lock_irqsave(&report_lock, *flags);
pr_err("==================================================================\n");
}
static void end_report(unsigned long *flags)
{
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, *flags);
if (panic_on_warn)
panic("panic_on_warn set ...\n");
kasan_enable_current();
}
static void print_track(struct kasan_track *track, const char *prefix)
{
pr_err("%s by task %u:\n", prefix, track->pid);
if (track->stack) {
struct stack_trace trace;
depot_fetch_stack(track->stack, &trace);
print_stack_trace(&trace, 0);
} else {
pr_err("(stack is not available)\n");
}
}
static struct page *addr_to_page(const void *addr)
{
if ((addr >= (void *)PAGE_OFFSET) &&
(addr < high_memory))
return virt_to_head_page(addr);
return NULL;
}
static void describe_object_addr(struct kmem_cache *cache, void *object,
const void *addr)
{
unsigned long access_addr = (unsigned long)addr;
unsigned long object_addr = (unsigned long)object;
const char *rel_type;
int rel_bytes;
pr_err("The buggy address belongs to the object at %px\n"
" which belongs to the cache %s of size %d\n",
object, cache->name, cache->object_size);
if (!addr)
return;
if (access_addr < object_addr) {
rel_type = "to the left";
rel_bytes = object_addr - access_addr;
} else if (access_addr >= object_addr + cache->object_size) {
rel_type = "to the right";
rel_bytes = access_addr - (object_addr + cache->object_size);
} else {
rel_type = "inside";
rel_bytes = access_addr - object_addr;
}
pr_err("The buggy address is located %d bytes %s of\n"
" %d-byte region [%px, %px)\n",
rel_bytes, rel_type, cache->object_size, (void *)object_addr,
(void *)(object_addr + cache->object_size));
}
static void describe_object(struct kmem_cache *cache, void *object,
const void *addr)
{
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
if (cache->flags & SLAB_KASAN) {
print_track(&alloc_info->alloc_track, "Allocated");
pr_err("\n");
print_track(&alloc_info->free_track, "Freed");
pr_err("\n");
}
describe_object_addr(cache, object, addr);
}
static inline bool kernel_or_module_addr(const void *addr)
{
if (addr >= (void *)_stext && addr < (void *)_end)
return true;
if (is_module_address((unsigned long)addr))
return true;
return false;
}
static inline bool init_task_stack_addr(const void *addr)
{
return addr >= (void *)&init_thread_union.stack &&
(addr <= (void *)&init_thread_union.stack +
sizeof(init_thread_union.stack));
}
static void print_address_description(void *addr)
{
struct page *page = addr_to_page(addr);
dump_stack();
pr_err("\n");
if (page && PageSlab(page)) {
struct kmem_cache *cache = page->slab_cache;
void *object = nearest_obj(cache, page, addr);
describe_object(cache, object, addr);
}
if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
pr_err("The buggy address belongs to the variable:\n");
pr_err(" %pS\n", addr);
}
if (page) {
pr_err("The buggy address belongs to the page:\n");
dump_page(page, "kasan: bad access detected");
}
}
static bool row_is_guilty(const void *row, const void *guilty)
{
return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
}
static int shadow_pointer_offset(const void *row, const void *shadow)
{
/* The length of ">ff00ff00ff00ff00: " is
* 3 + (BITS_PER_LONG/8)*2 chars.
*/
return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
(shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
}
static void print_shadow_for_address(const void *addr)
{
int i;
const void *shadow = kasan_mem_to_shadow(addr);
const void *shadow_row;
shadow_row = (void *)round_down((unsigned long)shadow,
SHADOW_BYTES_PER_ROW)
- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
pr_err("Memory state around the buggy address:\n");
for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
const void *kaddr = kasan_shadow_to_mem(shadow_row);
char buffer[4 + (BITS_PER_LONG/8)*2];
char shadow_buf[SHADOW_BYTES_PER_ROW];
snprintf(buffer, sizeof(buffer),
(i == 0) ? ">%px: " : " %px: ", kaddr);
/*
* We should not pass a shadow pointer to generic
* function, because generic functions may try to
* access kasan mapping for the passed address.
*/
memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
print_hex_dump(KERN_ERR, buffer,
DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
shadow_buf, SHADOW_BYTES_PER_ROW, 0);
if (row_is_guilty(shadow_row, shadow))
pr_err("%*c\n",
shadow_pointer_offset(shadow_row, shadow),
'^');
shadow_row += SHADOW_BYTES_PER_ROW;
}
}
static bool report_enabled(void)
{
if (current->kasan_depth)
return false;
if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
return true;
return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}
void kasan_report_invalid_free(void *object, unsigned long ip)
{
unsigned long flags;
start_report(&flags);
pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
print_tags(get_tag(object), reset_tag(object));
object = reset_tag(object);
pr_err("\n");
print_address_description(object);
pr_err("\n");
print_shadow_for_address(object);
end_report(&flags);
}
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip)
{
struct kasan_access_info info;
void *tagged_addr;
void *untagged_addr;
unsigned long flags;
if (likely(!report_enabled()))
return;
disable_trace_on_warning();
tagged_addr = (void *)addr;
untagged_addr = reset_tag(tagged_addr);
info.access_addr = tagged_addr;
if (addr_has_shadow(untagged_addr))
info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
else
info.first_bad_addr = untagged_addr;
info.access_size = size;
info.is_write = is_write;
info.ip = ip;
start_report(&flags);
print_error_description(&info);
if (addr_has_shadow(untagged_addr))
print_tags(get_tag(tagged_addr), info.first_bad_addr);
pr_err("\n");
if (addr_has_shadow(untagged_addr)) {
print_address_description(untagged_addr);
pr_err("\n");
print_shadow_for_address(info.first_bad_addr);
} else {
dump_stack();
}
end_report(&flags);
}
| {
"pile_set_name": "Github"
} |
// ==ClosureCompiler==
// @compilation_level ADVANCED_OPTIMIZATIONS
// @externs_url http://closure-compiler.googlecode.com/svn/trunk/contrib/externs/maps/google_maps_api_v3_3.js
// ==/ClosureCompiler==
/**
* @name MarkerClusterer for Google Maps v3
* @version version 1.0
* @author Luke Mahe
* @fileoverview
* The library creates and manages per-zoom-level clusters for large amounts of
* markers.
* <br/>
* This is a v3 implementation of the
* <a href="http://gmaps-utility-library-dev.googlecode.com/svn/tags/markerclusterer/"
* >v2 MarkerClusterer</a>.
*/
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A Marker Clusterer that clusters markers.
*
* @param {google.maps.Map} map The Google map to attach to.
* @param {Array.<google.maps.Marker>=} opt_markers Optional markers to add to
* the cluster.
* @param {Object=} opt_options support the following options:
* 'gridSize': (number) The grid size of a cluster in pixels.
* 'maxZoom': (number) The maximum zoom level that a marker can be part of a
* cluster.
* 'zoomOnClick': (boolean) Whether the default behaviour of clicking on a
* cluster is to zoom into it.
* 'averageCenter': (boolean) Wether the center of each cluster should be
* the average of all markers in the cluster.
* 'minimumClusterSize': (number) The minimum number of markers to be in a
* cluster before the markers are hidden and a count
* is shown.
* 'styles': (object) An object that has style properties:
* 'url': (string) The image url.
* 'height': (number) The image height.
* 'width': (number) The image width.
* 'anchor': (Array) The anchor position of the label text.
* 'textColor': (string) The text color.
* 'textSize': (number) The text size.
* 'backgroundPosition': (string) The position of the backgound x, y.
* @constructor
* @extends google.maps.OverlayView
*/
function MarkerClusterer(map, opt_markers, opt_options) {
// MarkerClusterer implements google.maps.OverlayView interface. We use the
// extend function to extend MarkerClusterer with google.maps.OverlayView
// because it might not always be available when the code is defined so we
// look for it at the last possible moment. If it doesn't exist now then
// there is no point going ahead :)
this.extend(MarkerClusterer, google.maps.OverlayView);
this.map_ = map;
/**
* @type {Array.<google.maps.Marker>}
* @private
*/
this.markers_ = [];
/**
* @type {Array.<Cluster>}
*/
this.clusters_ = [];
this.sizes = [53, 56, 66, 78, 90];
/**
* @private
*/
this.styles_ = [];
/**
* @type {boolean}
* @private
*/
this.ready_ = false;
var options = opt_options || {};
/**
* @type {number}
* @private
*/
this.gridSize_ = options['gridSize'] || 60;
/**
* @private
*/
this.minClusterSize_ = options['minimumClusterSize'] || 2;
/**
* @type {?number}
* @private
*/
this.maxZoom_ = options['maxZoom'] || null;
this.styles_ = options['styles'] || [];
/**
* @type {string}
* @private
*/
this.imagePath_ = options['imagePath'] ||
this.MARKER_CLUSTER_IMAGE_PATH_;
/**
* @type {string}
* @private
*/
this.imageExtension_ = options['imageExtension'] ||
this.MARKER_CLUSTER_IMAGE_EXTENSION_;
/**
* @type {boolean}
* @private
*/
this.zoomOnClick_ = true;
if (options['zoomOnClick'] != undefined) {
this.zoomOnClick_ = options['zoomOnClick'];
}
/**
* @type {boolean}
* @private
*/
this.averageCenter_ = false;
if (options['averageCenter'] != undefined) {
this.averageCenter_ = options['averageCenter'];
}
this.setupStyles_();
this.setMap(map);
/**
* @type {number}
* @private
*/
this.prevZoom_ = this.map_.getZoom();
// Add the map event listeners
var that = this;
google.maps.event.addListener(this.map_, 'zoom_changed', function() {
var zoom = that.map_.getZoom();
if (that.prevZoom_ != zoom) {
that.prevZoom_ = zoom;
that.resetViewport();
}
});
google.maps.event.addListener(this.map_, 'idle', function() {
that.redraw();
});
// Finally, add the markers
if (opt_markers && opt_markers.length) {
this.addMarkers(opt_markers, false);
}
}
/**
* The marker cluster image path.
*
* @type {string}
* @private
*/
MarkerClusterer.prototype.MARKER_CLUSTER_IMAGE_PATH_ =
'http://google-maps-utility-library-v3.googlecode.com/svn/trunk/markerclusterer/' +
'images/m';
/**
* The marker cluster image path.
*
* @type {string}
* @private
*/
MarkerClusterer.prototype.MARKER_CLUSTER_IMAGE_EXTENSION_ = 'png';
/**
* Extends a objects prototype by anothers.
*
* @param {Object} obj1 The object to be extended.
* @param {Object} obj2 The object to extend with.
* @return {Object} The new extended object.
* @ignore
*/
MarkerClusterer.prototype.extend = function(obj1, obj2) {
return (function(object) {
for (var property in object.prototype) {
this.prototype[property] = object.prototype[property];
}
return this;
}).apply(obj1, [obj2]);
};
/**
* Implementaion of the interface method.
* @ignore
*/
MarkerClusterer.prototype.onAdd = function() {
this.setReady_(true);
};
/**
* Implementaion of the interface method.
* @ignore
*/
MarkerClusterer.prototype.draw = function() {};
/**
* Sets up the styles object.
*
* @private
*/
MarkerClusterer.prototype.setupStyles_ = function() {
if (this.styles_.length) {
return;
}
for (var i = 0, size; size = this.sizes[i]; i++) {
this.styles_.push({
url: this.imagePath_ + (i + 1) + '.' + this.imageExtension_,
height: size,
width: size
});
}
};
/**
* Fit the map to the bounds of the markers in the clusterer.
*/
MarkerClusterer.prototype.fitMapToMarkers = function() {
var markers = this.getMarkers();
var bounds = new google.maps.LatLngBounds();
for (var i = 0, marker; marker = markers[i]; i++) {
bounds.extend(marker.getPosition());
}
this.map_.fitBounds(bounds);
};
/**
* Sets the styles.
*
* @param {Object} styles The style to set.
*/
MarkerClusterer.prototype.setStyles = function(styles) {
this.styles_ = styles;
};
/**
* Gets the styles.
*
* @return {Object} The styles object.
*/
MarkerClusterer.prototype.getStyles = function() {
return this.styles_;
};
/**
* Whether zoom on click is set.
*
* @return {boolean} True if zoomOnClick_ is set.
*/
MarkerClusterer.prototype.isZoomOnClick = function() {
return this.zoomOnClick_;
};
/**
* Whether average center is set.
*
* @return {boolean} True if averageCenter_ is set.
*/
MarkerClusterer.prototype.isAverageCenter = function() {
return this.averageCenter_;
};
/**
* Returns the array of markers in the clusterer.
*
* @return {Array.<google.maps.Marker>} The markers.
*/
MarkerClusterer.prototype.getMarkers = function() {
return this.markers_;
};
/**
* Returns the number of markers in the clusterer
*
* @return {Number} The number of markers.
*/
MarkerClusterer.prototype.getTotalMarkers = function() {
return this.markers_.length;
};
/**
* Sets the max zoom for the clusterer.
*
* @param {number} maxZoom The max zoom level.
*/
MarkerClusterer.prototype.setMaxZoom = function(maxZoom) {
this.maxZoom_ = maxZoom;
};
/**
* Gets the max zoom for the clusterer.
*
* @return {number} The max zoom level.
*/
MarkerClusterer.prototype.getMaxZoom = function() {
return this.maxZoom_;
};
/**
* The function for calculating the cluster icon image.
*
* @param {Array.<google.maps.Marker>} markers The markers in the clusterer.
* @param {number} numStyles The number of styles available.
* @return {Object} A object properties: 'text' (string) and 'index' (number).
* @private
*/
MarkerClusterer.prototype.calculator_ = function(markers, numStyles) {
var index = 0;
var count = markers.length;
var dv = count;
while (dv !== 0) {
dv = parseInt(dv / 10, 10);
index++;
}
index = Math.min(index, numStyles);
return {
text: count,
index: index
};
};
/**
* Set the calculator function.
*
* @param {function(Array, number)} calculator The function to set as the
* calculator. The function should return a object properties:
* 'text' (string) and 'index' (number).
*
*/
MarkerClusterer.prototype.setCalculator = function(calculator) {
this.calculator_ = calculator;
};
/**
* Get the calculator function.
*
* @return {function(Array, number)} the calculator function.
*/
MarkerClusterer.prototype.getCalculator = function() {
return this.calculator_;
};
/**
* Add an array of markers to the clusterer.
*
* @param {Array.<google.maps.Marker>} markers The markers to add.
* @param {boolean=} opt_nodraw Whether to redraw the clusters.
*/
MarkerClusterer.prototype.addMarkers = function(markers, opt_nodraw) {
for (var i = 0, marker; marker = markers[i]; i++) {
this.pushMarkerTo_(marker);
}
if (!opt_nodraw) {
this.redraw();
}
};
/**
* Pushes a marker to the clusterer.
*
* @param {google.maps.Marker} marker The marker to add.
* @private
*/
MarkerClusterer.prototype.pushMarkerTo_ = function(marker) {
marker.isAdded = false;
if (marker['draggable']) {
// If the marker is draggable add a listener so we update the clusters on
// the drag end.
var that = this;
google.maps.event.addListener(marker, 'dragend', function() {
marker.isAdded = false;
that.repaint();
});
}
this.markers_.push(marker);
};
/**
* Adds a marker to the clusterer and redraws if needed.
*
* @param {google.maps.Marker} marker The marker to add.
* @param {boolean=} opt_nodraw Whether to redraw the clusters.
*/
MarkerClusterer.prototype.addMarker = function(marker, opt_nodraw) {
this.pushMarkerTo_(marker);
if (!opt_nodraw) {
this.redraw();
}
};
/**
* Removes a marker and returns true if removed, false if not
*
* @param {google.maps.Marker} marker The marker to remove
* @return {boolean} Whether the marker was removed or not
* @private
*/
MarkerClusterer.prototype.removeMarker_ = function(marker) {
var index = -1;
if (this.markers_.indexOf) {
index = this.markers_.indexOf(marker);
} else {
for (var i = 0, m; m = this.markers_[i]; i++) {
if (m == marker) {
index = i;
break;
}
}
}
if (index == -1) {
// Marker is not in our list of markers.
return false;
}
marker.setMap(null);
this.markers_.splice(index, 1);
return true;
};
/**
* Remove a marker from the cluster.
*
* @param {google.maps.Marker} marker The marker to remove.
* @param {boolean=} opt_nodraw Optional boolean to force no redraw.
* @return {boolean} True if the marker was removed.
*/
MarkerClusterer.prototype.removeMarker = function(marker, opt_nodraw) {
var removed = this.removeMarker_(marker);
if (!opt_nodraw && removed) {
this.resetViewport();
this.redraw();
return true;
} else {
return false;
}
};
/**
* Removes an array of markers from the cluster.
*
* @param {Array.<google.maps.Marker>} markers The markers to remove.
* @param {boolean=} opt_nodraw Optional boolean to force no redraw.
*/
MarkerClusterer.prototype.removeMarkers = function(markers, opt_nodraw) {
var removed = false;
for (var i = 0, marker; marker = markers[i]; i++) {
var r = this.removeMarker_(marker);
removed = removed || r;
}
if (!opt_nodraw && removed) {
this.resetViewport();
this.redraw();
return true;
}
};
/**
* Sets the clusterer's ready state.
*
* @param {boolean} ready The state.
* @private
*/
MarkerClusterer.prototype.setReady_ = function(ready) {
if (!this.ready_) {
this.ready_ = ready;
this.createClusters_();
}
};
/**
* Returns the number of clusters in the clusterer.
*
* @return {number} The number of clusters.
*/
MarkerClusterer.prototype.getTotalClusters = function() {
return this.clusters_.length;
};
/**
* Returns the google map that the clusterer is associated with.
*
* @return {google.maps.Map} The map.
*/
MarkerClusterer.prototype.getMap = function() {
return this.map_;
};
/**
* Sets the google map that the clusterer is associated with.
*
* @param {google.maps.Map} map The map.
*/
MarkerClusterer.prototype.setMap = function(map) {
this.map_ = map;
};
/**
* Returns the size of the grid.
*
* @return {number} The grid size.
*/
MarkerClusterer.prototype.getGridSize = function() {
return this.gridSize_;
};
/**
* Sets the size of the grid.
*
* @param {number} size The grid size.
*/
MarkerClusterer.prototype.setGridSize = function(size) {
this.gridSize_ = size;
};
/**
* Returns the min cluster size.
*
* @return {number} The grid size.
*/
MarkerClusterer.prototype.getMinClusterSize = function() {
return this.minClusterSize_;
};
/**
* Sets the min cluster size.
*
* @param {number} size The grid size.
*/
MarkerClusterer.prototype.setMinClusterSize = function(size) {
this.minClusterSize_ = size;
};
/**
* Extends a bounds object by the grid size.
*
* @param {google.maps.LatLngBounds} bounds The bounds to extend.
* @return {google.maps.LatLngBounds} The extended bounds.
*/
MarkerClusterer.prototype.getExtendedBounds = function(bounds) {
var projection = this.getProjection();
// Turn the bounds into latlng.
var tr = new google.maps.LatLng(bounds.getNorthEast().lat(),
bounds.getNorthEast().lng());
var bl = new google.maps.LatLng(bounds.getSouthWest().lat(),
bounds.getSouthWest().lng());
// Convert the points to pixels and the extend out by the grid size.
var trPix = projection.fromLatLngToDivPixel(tr);
trPix.x += this.gridSize_;
trPix.y -= this.gridSize_;
var blPix = projection.fromLatLngToDivPixel(bl);
blPix.x -= this.gridSize_;
blPix.y += this.gridSize_;
// Convert the pixel points back to LatLng
var ne = projection.fromDivPixelToLatLng(trPix);
var sw = projection.fromDivPixelToLatLng(blPix);
// Extend the bounds to contain the new bounds.
bounds.extend(ne);
bounds.extend(sw);
return bounds;
};
/**
* Determins if a marker is contained in a bounds.
*
* @param {google.maps.Marker} marker The marker to check.
* @param {google.maps.LatLngBounds} bounds The bounds to check against.
* @return {boolean} True if the marker is in the bounds.
* @private
*/
MarkerClusterer.prototype.isMarkerInBounds_ = function(marker, bounds) {
return bounds.contains(marker.getPosition());
};
/**
* Clears all clusters and markers from the clusterer.
*/
MarkerClusterer.prototype.clearMarkers = function() {
this.resetViewport(true);
// Set the markers a empty array.
this.markers_ = [];
};
/**
* Clears all existing clusters and recreates them.
* @param {boolean} opt_hide To also hide the marker.
*/
MarkerClusterer.prototype.resetViewport = function(opt_hide) {
// Remove all the clusters
for (var i = 0, cluster; cluster = this.clusters_[i]; i++) {
cluster.remove();
}
// Reset the markers to not be added and to be invisible.
for (var i = 0, marker; marker = this.markers_[i]; i++) {
marker.isAdded = false;
if (opt_hide) {
marker.setMap(null);
}
}
this.clusters_ = [];
};
/**
*
*/
MarkerClusterer.prototype.repaint = function() {
var oldClusters = this.clusters_.slice();
this.clusters_.length = 0;
this.resetViewport();
this.redraw();
// Remove the old clusters.
// Do it in a timeout so the other clusters have been drawn first.
window.setTimeout(function() {
for (var i = 0, cluster; cluster = oldClusters[i]; i++) {
cluster.remove();
}
}, 0);
};
/**
* Redraws the clusters.
*/
MarkerClusterer.prototype.redraw = function() {
this.createClusters_();
};
/**
* Calculates the distance between two latlng locations in km.
* @see http://www.movable-type.co.uk/scripts/latlong.html
*
* @param {google.maps.LatLng} p1 The first lat lng point.
* @param {google.maps.LatLng} p2 The second lat lng point.
* @return {number} The distance between the two points in km.
* @private
*/
MarkerClusterer.prototype.distanceBetweenPoints_ = function(p1, p2) {
if (!p1 || !p2) {
return 0;
}
var R = 6371; // Radius of the Earth in km
var dLat = (p2.lat() - p1.lat()) * Math.PI / 180;
var dLon = (p2.lng() - p1.lng()) * Math.PI / 180;
var a = Math.sin(dLat / 2) * Math.sin(dLat / 2) +
Math.cos(p1.lat() * Math.PI / 180) * Math.cos(p2.lat() * Math.PI / 180) *
Math.sin(dLon / 2) * Math.sin(dLon / 2);
var c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a));
var d = R * c;
return d;
};
/**
* Add a marker to a cluster, or creates a new cluster.
*
* @param {google.maps.Marker} marker The marker to add.
* @private
*/
MarkerClusterer.prototype.addToClosestCluster_ = function(marker) {
var distance = 40000; // Some large number
var clusterToAddTo = null;
var pos = marker.getPosition();
for (var i = 0, cluster; cluster = this.clusters_[i]; i++) {
var center = cluster.getCenter();
if (center) {
var d = this.distanceBetweenPoints_(center, marker.getPosition());
if (d < distance) {
distance = d;
clusterToAddTo = cluster;
}
}
}
if (clusterToAddTo && clusterToAddTo.isMarkerInClusterBounds(marker)) {
clusterToAddTo.addMarker(marker);
} else {
var cluster = new Cluster(this);
cluster.addMarker(marker);
this.clusters_.push(cluster);
}
};
/**
* Creates the clusters.
*
* @private
*/
MarkerClusterer.prototype.createClusters_ = function() {
if (!this.ready_) {
return;
}
// Get our current map view bounds.
// Create a new bounds object so we don't affect the map.
var mapBounds = new google.maps.LatLngBounds(this.map_.getBounds().getSouthWest(),
this.map_.getBounds().getNorthEast());
var bounds = this.getExtendedBounds(mapBounds);
for (var i = 0, marker; marker = this.markers_[i]; i++) {
if (!marker.isAdded && this.isMarkerInBounds_(marker, bounds)) {
this.addToClosestCluster_(marker);
}
}
};
/**
* A cluster that contains markers.
*
* @param {MarkerClusterer} markerClusterer The markerclusterer that this
* cluster is associated with.
* @constructor
* @ignore
*/
function Cluster(markerClusterer) {
this.markerClusterer_ = markerClusterer;
this.map_ = markerClusterer.getMap();
this.gridSize_ = markerClusterer.getGridSize();
this.minClusterSize_ = markerClusterer.getMinClusterSize();
this.averageCenter_ = markerClusterer.isAverageCenter();
this.center_ = null;
this.markers_ = [];
this.bounds_ = null;
this.clusterIcon_ = new ClusterIcon(this, markerClusterer.getStyles(),
markerClusterer.getGridSize());
}
/**
* Determins if a marker is already added to the cluster.
*
* @param {google.maps.Marker} marker The marker to check.
* @return {boolean} True if the marker is already added.
*/
Cluster.prototype.isMarkerAlreadyAdded = function(marker) {
if (this.markers_.indexOf) {
return this.markers_.indexOf(marker) != -1;
} else {
for (var i = 0, m; m = this.markers_[i]; i++) {
if (m == marker) {
return true;
}
}
}
return false;
};
/**
* Add a marker the cluster.
*
* @param {google.maps.Marker} marker The marker to add.
* @return {boolean} True if the marker was added.
*/
Cluster.prototype.addMarker = function(marker) {
if (this.isMarkerAlreadyAdded(marker)) {
return false;
}
if (!this.center_) {
this.center_ = marker.getPosition();
this.calculateBounds_();
} else {
if (this.averageCenter_) {
var l = this.markers_.length + 1;
var lat = (this.center_.lat() * (l-1) + marker.getPosition().lat()) / l;
var lng = (this.center_.lng() * (l-1) + marker.getPosition().lng()) / l;
this.center_ = new google.maps.LatLng(lat, lng);
this.calculateBounds_();
}
}
marker.isAdded = true;
this.markers_.push(marker);
var len = this.markers_.length;
if (len < this.minClusterSize_ && marker.getMap() != this.map_) {
// Min cluster size not reached so show the marker.
marker.setMap(this.map_);
}
if (len == this.minClusterSize_) {
// Hide the markers that were showing.
for (var i = 0; i < len; i++) {
this.markers_[i].setMap(null);
}
}
if (len >= this.minClusterSize_) {
marker.setMap(null);
}
this.updateIcon();
return true;
};
/**
* Returns the marker clusterer that the cluster is associated with.
*
* @return {MarkerClusterer} The associated marker clusterer.
*/
Cluster.prototype.getMarkerClusterer = function() {
return this.markerClusterer_;
};
/**
* Returns the bounds of the cluster.
*
* @return {google.maps.LatLngBounds} the cluster bounds.
*/
Cluster.prototype.getBounds = function() {
var bounds = new google.maps.LatLngBounds(this.center_, this.center_);
var markers = this.getMarkers();
for (var i = 0, marker; marker = markers[i]; i++) {
bounds.extend(marker.getPosition());
}
return bounds;
};
/**
* Removes the cluster
*/
Cluster.prototype.remove = function() {
this.clusterIcon_.remove();
this.markers_.length = 0;
delete this.markers_;
};
/**
* Returns the center of the cluster.
*
* @return {number} The cluster center.
*/
Cluster.prototype.getSize = function() {
return this.markers_.length;
};
/**
* Returns the center of the cluster.
*
* @return {Array.<google.maps.Marker>} The cluster center.
*/
Cluster.prototype.getMarkers = function() {
return this.markers_;
};
/**
* Returns the center of the cluster.
*
* @return {google.maps.LatLng} The cluster center.
*/
Cluster.prototype.getCenter = function() {
return this.center_;
};
/**
* Calculated the extended bounds of the cluster with the grid.
*
* @private
*/
Cluster.prototype.calculateBounds_ = function() {
var bounds = new google.maps.LatLngBounds(this.center_, this.center_);
this.bounds_ = this.markerClusterer_.getExtendedBounds(bounds);
};
/**
* Determines if a marker lies in the clusters bounds.
*
* @param {google.maps.Marker} marker The marker to check.
* @return {boolean} True if the marker lies in the bounds.
*/
Cluster.prototype.isMarkerInClusterBounds = function(marker) {
return this.bounds_.contains(marker.getPosition());
};
/**
* Returns the map that the cluster is associated with.
*
* @return {google.maps.Map} The map.
*/
Cluster.prototype.getMap = function() {
return this.map_;
};
/**
* Updates the cluster icon
*/
Cluster.prototype.updateIcon = function() {
var zoom = this.map_.getZoom();
var mz = this.markerClusterer_.getMaxZoom();
if (mz && zoom > mz) {
// The zoom is greater than our max zoom so show all the markers in cluster.
for (var i = 0, marker; marker = this.markers_[i]; i++) {
marker.setMap(this.map_);
}
return;
}
if (this.markers_.length < this.minClusterSize_) {
// Min cluster size not yet reached.
this.clusterIcon_.hide();
return;
}
var numStyles = this.markerClusterer_.getStyles().length;
var sums = this.markerClusterer_.getCalculator()(this.markers_, numStyles);
this.clusterIcon_.setCenter(this.center_);
this.clusterIcon_.setSums(sums);
this.clusterIcon_.show();
};
/**
* A cluster icon
*
* @param {Cluster} cluster The cluster to be associated with.
* @param {Object} styles An object that has style properties:
* 'url': (string) The image url.
* 'height': (number) The image height.
* 'width': (number) The image width.
* 'anchor': (Array) The anchor position of the label text.
* 'textColor': (string) The text color.
* 'textSize': (number) The text size.
* 'backgroundPosition: (string) The background postition x, y.
* @param {number=} opt_padding Optional padding to apply to the cluster icon.
* @constructor
* @extends google.maps.OverlayView
* @ignore
*/
function ClusterIcon(cluster, styles, opt_padding) {
cluster.getMarkerClusterer().extend(ClusterIcon, google.maps.OverlayView);
this.styles_ = styles;
this.padding_ = opt_padding || 0;
this.cluster_ = cluster;
this.center_ = null;
this.map_ = cluster.getMap();
this.div_ = null;
this.sums_ = null;
this.visible_ = false;
this.setMap(this.map_);
}
/**
* Triggers the clusterclick event and zoom's if the option is set.
*/
ClusterIcon.prototype.triggerClusterClick = function() {
var markerClusterer = this.cluster_.getMarkerClusterer();
// Trigger the clusterclick event.
google.maps.event.trigger(markerClusterer, 'clusterclick', this.cluster_);
if (markerClusterer.isZoomOnClick()) {
// Zoom into the cluster.
this.map_.fitBounds(this.cluster_.getBounds());
}
};
/**
* Adding the cluster icon to the dom.
* @ignore
*/
ClusterIcon.prototype.onAdd = function() {
this.div_ = document.createElement('DIV');
if (this.visible_) {
var pos = this.getPosFromLatLng_(this.center_);
this.div_.style.cssText = this.createCss(pos);
this.div_.innerHTML = this.sums_.text;
}
var panes = this.getPanes();
panes.overlayMouseTarget.appendChild(this.div_);
var that = this;
google.maps.event.addDomListener(this.div_, 'click', function() {
that.triggerClusterClick();
});
};
/**
* Returns the position to place the div dending on the latlng.
*
* @param {google.maps.LatLng} latlng The position in latlng.
* @return {google.maps.Point} The position in pixels.
* @private
*/
ClusterIcon.prototype.getPosFromLatLng_ = function(latlng) {
var pos = this.getProjection().fromLatLngToDivPixel(latlng);
pos.x -= parseInt(this.width_ / 2, 10);
pos.y -= parseInt(this.height_ / 2, 10);
return pos;
};
/**
* Draw the icon.
* @ignore
*/
ClusterIcon.prototype.draw = function() {
if (this.visible_) {
var pos = this.getPosFromLatLng_(this.center_);
this.div_.style.top = pos.y + 'px';
this.div_.style.left = pos.x + 'px';
}
};
/**
* Hide the icon.
*/
ClusterIcon.prototype.hide = function() {
if (this.div_) {
this.div_.style.display = 'none';
}
this.visible_ = false;
};
/**
* Position and show the icon.
*/
ClusterIcon.prototype.show = function() {
if (this.div_) {
var pos = this.getPosFromLatLng_(this.center_);
this.div_.style.cssText = this.createCss(pos);
this.div_.style.display = '';
}
this.visible_ = true;
};
/**
* Remove the icon from the map
*/
ClusterIcon.prototype.remove = function() {
this.setMap(null);
};
/**
* Implementation of the onRemove interface.
* @ignore
*/
ClusterIcon.prototype.onRemove = function() {
if (this.div_ && this.div_.parentNode) {
this.hide();
this.div_.parentNode.removeChild(this.div_);
this.div_ = null;
}
};
/**
* Set the sums of the icon.
*
* @param {Object} sums The sums containing:
* 'text': (string) The text to display in the icon.
* 'index': (number) The style index of the icon.
*/
ClusterIcon.prototype.setSums = function(sums) {
this.sums_ = sums;
this.text_ = sums.text;
this.index_ = sums.index;
if (this.div_) {
this.div_.innerHTML = sums.text;
}
this.useStyle();
};
/**
* Sets the icon to the the styles.
*/
ClusterIcon.prototype.useStyle = function() {
var index = Math.max(0, this.sums_.index - 1);
index = Math.min(this.styles_.length - 1, index);
var style = this.styles_[index];
this.url_ = style['url'];
this.height_ = style['height'];
this.width_ = style['width'];
this.textColor_ = style['textColor'];
this.anchor_ = style['anchor'];
this.textSize_ = style['textSize'];
this.backgroundPosition_ = style['backgroundPosition'];
};
/**
* Sets the center of the icon.
*
* @param {google.maps.LatLng} center The latlng to set as the center.
*/
ClusterIcon.prototype.setCenter = function(center) {
this.center_ = center;
};
/**
* Create the css text based on the position of the icon.
*
* @param {google.maps.Point} pos The position.
* @return {string} The css style text.
*/
ClusterIcon.prototype.createCss = function(pos) {
var style = [];
style.push('background-image:url(' + this.url_ + ');');
var backgroundPosition = this.backgroundPosition_ ? this.backgroundPosition_ : '0 0';
style.push('background-position:' + backgroundPosition + ';');
if (typeof this.anchor_ === 'object') {
if (typeof this.anchor_[0] === 'number' && this.anchor_[0] > 0 &&
this.anchor_[0] < this.height_) {
style.push('height:' + (this.height_ - this.anchor_[0]) +
'px; padding-top:' + this.anchor_[0] + 'px;');
} else {
style.push('height:' + this.height_ + 'px; line-height:' + this.height_ +
'px;');
}
if (typeof this.anchor_[1] === 'number' && this.anchor_[1] > 0 &&
this.anchor_[1] < this.width_) {
style.push('width:' + (this.width_ - this.anchor_[1]) +
'px; padding-left:' + this.anchor_[1] + 'px;');
} else {
style.push('width:' + this.width_ + 'px; text-align:center;');
}
} else {
style.push('height:' + this.height_ + 'px; line-height:' +
this.height_ + 'px; width:' + this.width_ + 'px; text-align:center;');
}
var txtColor = this.textColor_ ? this.textColor_ : 'black';
var txtSize = this.textSize_ ? this.textSize_ : 11;
style.push('cursor:pointer; top:' + pos.y + 'px; left:' +
pos.x + 'px; color:' + txtColor + '; position:absolute; font-size:' +
txtSize + 'px; font-family:Arial,sans-serif; font-weight:bold');
return style.join('');
};
// Export Symbols for Closure
// If you are not going to compile with closure then you can remove the
// code below.
window['MarkerClusterer'] = MarkerClusterer;
MarkerClusterer.prototype['addMarker'] = MarkerClusterer.prototype.addMarker;
MarkerClusterer.prototype['addMarkers'] = MarkerClusterer.prototype.addMarkers;
MarkerClusterer.prototype['clearMarkers'] =
MarkerClusterer.prototype.clearMarkers;
MarkerClusterer.prototype['fitMapToMarkers'] =
MarkerClusterer.prototype.fitMapToMarkers;
MarkerClusterer.prototype['getCalculator'] =
MarkerClusterer.prototype.getCalculator;
MarkerClusterer.prototype['getGridSize'] =
MarkerClusterer.prototype.getGridSize;
MarkerClusterer.prototype['getExtendedBounds'] =
MarkerClusterer.prototype.getExtendedBounds;
MarkerClusterer.prototype['getMap'] = MarkerClusterer.prototype.getMap;
MarkerClusterer.prototype['getMarkers'] = MarkerClusterer.prototype.getMarkers;
MarkerClusterer.prototype['getMaxZoom'] = MarkerClusterer.prototype.getMaxZoom;
MarkerClusterer.prototype['getStyles'] = MarkerClusterer.prototype.getStyles;
MarkerClusterer.prototype['getTotalClusters'] =
MarkerClusterer.prototype.getTotalClusters;
MarkerClusterer.prototype['getTotalMarkers'] =
MarkerClusterer.prototype.getTotalMarkers;
MarkerClusterer.prototype['redraw'] = MarkerClusterer.prototype.redraw;
MarkerClusterer.prototype['removeMarker'] =
MarkerClusterer.prototype.removeMarker;
MarkerClusterer.prototype['removeMarkers'] =
MarkerClusterer.prototype.removeMarkers;
MarkerClusterer.prototype['resetViewport'] =
MarkerClusterer.prototype.resetViewport;
MarkerClusterer.prototype['repaint'] =
MarkerClusterer.prototype.repaint;
MarkerClusterer.prototype['setCalculator'] =
MarkerClusterer.prototype.setCalculator;
MarkerClusterer.prototype['setGridSize'] =
MarkerClusterer.prototype.setGridSize;
MarkerClusterer.prototype['setMaxZoom'] =
MarkerClusterer.prototype.setMaxZoom;
MarkerClusterer.prototype['onAdd'] = MarkerClusterer.prototype.onAdd;
MarkerClusterer.prototype['draw'] = MarkerClusterer.prototype.draw;
Cluster.prototype['getCenter'] = Cluster.prototype.getCenter;
Cluster.prototype['getSize'] = Cluster.prototype.getSize;
Cluster.prototype['getMarkers'] = Cluster.prototype.getMarkers;
ClusterIcon.prototype['onAdd'] = ClusterIcon.prototype.onAdd;
ClusterIcon.prototype['draw'] = ClusterIcon.prototype.draw;
ClusterIcon.prototype['onRemove'] = ClusterIcon.prototype.onRemove;
| {
"pile_set_name": "Github"
} |
# frozen_string_literal: true
# This file was generated by Appraisal
source 'https://rubygems.org'
gem 'rails', '~> 5.0.7.2'
gem 'sqlite3', '~> 1.3.6'
gemspec path: '../'
| {
"pile_set_name": "Github"
} |
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_RootingAPI_h
#define js_RootingAPI_h
#include "mozilla/Attributes.h"
#include "mozilla/GuardObjects.h"
#include "mozilla/LinkedList.h"
#include "mozilla/NullPtr.h"
#include "mozilla/TypeTraits.h"
#include "jspubtd.h"
#include "js/GCAPI.h"
#include "js/HeapAPI.h"
#include "js/TypeDecls.h"
#include "js/Utility.h"
/*
* Moving GC Stack Rooting
*
* A moving GC may change the physical location of GC allocated things, even
* when they are rooted, updating all pointers to the thing to refer to its new
* location. The GC must therefore know about all live pointers to a thing,
* not just one of them, in order to behave correctly.
*
* The |Rooted| and |Handle| classes below are used to root stack locations
* whose value may be held live across a call that can trigger GC. For a
* code fragment such as:
*
* JSObject *obj = NewObject(cx);
* DoSomething(cx);
* ... = obj->lastProperty();
*
* If |DoSomething()| can trigger a GC, the stack location of |obj| must be
* rooted to ensure that the GC does not move the JSObject referred to by
* |obj| without updating |obj|'s location itself. This rooting must happen
* regardless of whether there are other roots which ensure that the object
* itself will not be collected.
*
* If |DoSomething()| cannot trigger a GC, and the same holds for all other
* calls made between |obj|'s definitions and its last uses, then no rooting
* is required.
*
* SpiderMonkey can trigger a GC at almost any time and in ways that are not
* always clear. For example, the following innocuous-looking actions can
* cause a GC: allocation of any new GC thing; JSObject::hasProperty;
* JS_ReportError and friends; and ToNumber, among many others. The following
* dangerous-looking actions cannot trigger a GC: js_malloc, cx->malloc_,
* rt->malloc_, and friends and JS_ReportOutOfMemory.
*
* The following family of three classes will exactly root a stack location.
* Incorrect usage of these classes will result in a compile error in almost
* all cases. Therefore, it is very hard to be incorrectly rooted if you use
* these classes exclusively. These classes are all templated on the type T of
* the value being rooted.
*
* - Rooted<T> declares a variable of type T, whose value is always rooted.
* Rooted<T> may be automatically coerced to a Handle<T>, below. Rooted<T>
* should be used whenever a local variable's value may be held live across a
* call which can trigger a GC.
*
* - Handle<T> is a const reference to a Rooted<T>. Functions which take GC
* things or values as arguments and need to root those arguments should
* generally use handles for those arguments and avoid any explicit rooting.
* This has two benefits. First, when several such functions call each other
* then redundant rooting of multiple copies of the GC thing can be avoided.
* Second, if the caller does not pass a rooted value a compile error will be
* generated, which is quicker and easier to fix than when relying on a
* separate rooting analysis.
*
* - MutableHandle<T> is a non-const reference to Rooted<T>. It is used in the
* same way as Handle<T> and includes a |set(const T &v)| method to allow
* updating the value of the referenced Rooted<T>. A MutableHandle<T> can be
* created from a Rooted<T> by using |Rooted<T>::operator&()|.
*
* In some cases the small performance overhead of exact rooting (measured to
* be a few nanoseconds on desktop) is too much. In these cases, try the
* following:
*
* - Move all Rooted<T> above inner loops: this allows you to re-use the root
* on each iteration of the loop.
*
* - Pass Handle<T> through your hot call stack to avoid re-rooting costs at
* every invocation.
*
* The following diagram explains the list of supported, implicit type
* conversions between classes of this family:
*
* Rooted<T> ----> Handle<T>
* | ^
* | |
* | |
* +---> MutableHandle<T>
* (via &)
*
* All of these types have an implicit conversion to raw pointers.
*/
namespace js {
class ScriptSourceObject;
template <typename T>
struct GCMethods {};
template <typename T>
class RootedBase {};
template <typename T>
class HandleBase {};
template <typename T>
class MutableHandleBase {};
template <typename T>
class HeapBase {};
/*
* js::NullPtr acts like a nullptr pointer in contexts that require a Handle.
*
* Handle provides an implicit constructor for js::NullPtr so that, given:
* foo(Handle<JSObject*> h);
* callers can simply write:
* foo(js::NullPtr());
* which avoids creating a Rooted<JSObject*> just to pass nullptr.
*
* This is the SpiderMonkey internal variant. js::NullPtr should be used in
* preference to JS::NullPtr to avoid the GOT access required for JS_PUBLIC_API
* symbols.
*/
struct NullPtr
{
static void * const constNullValue;
};
namespace gc {
struct Cell;
template<typename T>
struct PersistentRootedMarker;
} /* namespace gc */
} /* namespace js */
namespace JS {
template <typename T> class Rooted;
template <typename T> class PersistentRooted;
/* This is exposing internal state of the GC for inlining purposes. */
JS_FRIEND_API(bool) isGCEnabled();
/*
* JS::NullPtr acts like a nullptr pointer in contexts that require a Handle.
*
* Handle provides an implicit constructor for JS::NullPtr so that, given:
* foo(Handle<JSObject*> h);
* callers can simply write:
* foo(JS::NullPtr());
* which avoids creating a Rooted<JSObject*> just to pass nullptr.
*/
struct JS_PUBLIC_API(NullPtr)
{
static void * const constNullValue;
};
/*
* The Heap<T> class is a heap-stored reference to a JS GC thing. All members of
* heap classes that refer to GC things should use Heap<T> (or possibly
* TenuredHeap<T>, described below).
*
* Heap<T> is an abstraction that hides some of the complexity required to
* maintain GC invariants for the contained reference. It uses operator
* overloading to provide a normal pointer interface, but notifies the GC every
* time the value it contains is updated. This is necessary for generational GC,
* which keeps track of all pointers into the nursery.
*
* Heap<T> instances must be traced when their containing object is traced to
* keep the pointed-to GC thing alive.
*
* Heap<T> objects should only be used on the heap. GC references stored on the
* C/C++ stack must use Rooted/Handle/MutableHandle instead.
*
* Type T must be one of: JS::Value, jsid, JSObject*, JSString*, JSScript*
*/
template <typename T>
class Heap : public js::HeapBase<T>
{
public:
Heap() {
static_assert(sizeof(T) == sizeof(Heap<T>),
"Heap<T> must be binary compatible with T.");
init(js::GCMethods<T>::initial());
}
explicit Heap(T p) { init(p); }
/*
* For Heap, move semantics are equivalent to copy semantics. In C++, a
* copy constructor taking const-ref is the way to get a single function
* that will be used for both lvalue and rvalue copies, so we can simply
* omit the rvalue variant.
*/
explicit Heap(const Heap<T> &p) { init(p.ptr); }
~Heap() {
if (js::GCMethods<T>::needsPostBarrier(ptr))
relocate();
}
bool operator==(const Heap<T> &other) { return ptr == other.ptr; }
bool operator!=(const Heap<T> &other) { return ptr != other.ptr; }
bool operator==(const T &other) const { return ptr == other; }
bool operator!=(const T &other) const { return ptr != other; }
operator T() const { return ptr; }
T operator->() const { return ptr; }
const T *address() const { return &ptr; }
const T &get() const { return ptr; }
T *unsafeGet() { return &ptr; }
Heap<T> &operator=(T p) {
set(p);
return *this;
}
Heap<T> &operator=(const Heap<T>& other) {
set(other.get());
return *this;
}
void set(T newPtr) {
MOZ_ASSERT(!js::GCMethods<T>::poisoned(newPtr));
if (js::GCMethods<T>::needsPostBarrier(newPtr)) {
ptr = newPtr;
post();
} else if (js::GCMethods<T>::needsPostBarrier(ptr)) {
relocate(); /* Called before overwriting ptr. */
ptr = newPtr;
} else {
ptr = newPtr;
}
}
/*
* Set the pointer to a value which will cause a crash if it is
* dereferenced.
*/
void setToCrashOnTouch() {
ptr = reinterpret_cast<T>(crashOnTouchPointer);
}
bool isSetToCrashOnTouch() {
return ptr == crashOnTouchPointer;
}
private:
void init(T newPtr) {
MOZ_ASSERT(!js::GCMethods<T>::poisoned(newPtr));
ptr = newPtr;
if (js::GCMethods<T>::needsPostBarrier(ptr))
post();
}
void post() {
#ifdef JSGC_GENERATIONAL
MOZ_ASSERT(js::GCMethods<T>::needsPostBarrier(ptr));
js::GCMethods<T>::postBarrier(&ptr);
#endif
}
void relocate() {
#ifdef JSGC_GENERATIONAL
js::GCMethods<T>::relocate(&ptr);
#endif
}
enum {
crashOnTouchPointer = 1
};
T ptr;
};
#ifdef JS_DEBUG
/*
* For generational GC, assert that an object is in the tenured generation as
* opposed to being in the nursery.
*/
extern JS_FRIEND_API(void)
AssertGCThingMustBeTenured(JSObject* obj);
#else
inline void
AssertGCThingMustBeTenured(JSObject *obj) {}
#endif
/*
* The TenuredHeap<T> class is similar to the Heap<T> class above in that it
* encapsulates the GC concerns of an on-heap reference to a JS object. However,
* it has two important differences:
*
* 1) Pointers which are statically known to only reference "tenured" objects
* can avoid the extra overhead of SpiderMonkey's write barriers.
*
* 2) Objects in the "tenured" heap have stronger alignment restrictions than
* those in the "nursery", so it is possible to store flags in the lower
* bits of pointers known to be tenured. TenuredHeap wraps a normal tagged
* pointer with a nice API for accessing the flag bits and adds various
* assertions to ensure that it is not mis-used.
*
* GC things are said to be "tenured" when they are located in the long-lived
* heap: e.g. they have gained tenure as an object by surviving past at least
* one GC. For performance, SpiderMonkey allocates some things which are known
* to normally be long lived directly into the tenured generation; for example,
* global objects. Additionally, SpiderMonkey does not visit individual objects
* when deleting non-tenured objects, so object with finalizers are also always
* tenured; for instance, this includes most DOM objects.
*
* The considerations to keep in mind when using a TenuredHeap<T> vs a normal
* Heap<T> are:
*
* - It is invalid for a TenuredHeap<T> to refer to a non-tenured thing.
* - It is however valid for a Heap<T> to refer to a tenured thing.
* - It is not possible to store flag bits in a Heap<T>.
*/
template <typename T>
class TenuredHeap : public js::HeapBase<T>
{
public:
TenuredHeap() : bits(0) {
static_assert(sizeof(T) == sizeof(TenuredHeap<T>),
"TenuredHeap<T> must be binary compatible with T.");
}
explicit TenuredHeap(T p) : bits(0) { setPtr(p); }
explicit TenuredHeap(const TenuredHeap<T> &p) : bits(0) { setPtr(p.getPtr()); }
bool operator==(const TenuredHeap<T> &other) { return bits == other.bits; }
bool operator!=(const TenuredHeap<T> &other) { return bits != other.bits; }
void setPtr(T newPtr) {
MOZ_ASSERT((reinterpret_cast<uintptr_t>(newPtr) & flagsMask) == 0);
MOZ_ASSERT(!js::GCMethods<T>::poisoned(newPtr));
if (newPtr)
AssertGCThingMustBeTenured(newPtr);
bits = (bits & flagsMask) | reinterpret_cast<uintptr_t>(newPtr);
}
void setFlags(uintptr_t flagsToSet) {
MOZ_ASSERT((flagsToSet & ~flagsMask) == 0);
bits |= flagsToSet;
}
void unsetFlags(uintptr_t flagsToUnset) {
MOZ_ASSERT((flagsToUnset & ~flagsMask) == 0);
bits &= ~flagsToUnset;
}
bool hasFlag(uintptr_t flag) const {
MOZ_ASSERT((flag & ~flagsMask) == 0);
return (bits & flag) != 0;
}
T getPtr() const { return reinterpret_cast<T>(bits & ~flagsMask); }
uintptr_t getFlags() const { return bits & flagsMask; }
operator T() const { return getPtr(); }
T operator->() const { return getPtr(); }
TenuredHeap<T> &operator=(T p) {
setPtr(p);
return *this;
}
TenuredHeap<T> &operator=(const TenuredHeap<T>& other) {
bits = other.bits;
return *this;
}
private:
enum {
maskBits = 3,
flagsMask = (1 << maskBits) - 1,
};
uintptr_t bits;
};
/*
* Reference to a T that has been rooted elsewhere. This is most useful
* as a parameter type, which guarantees that the T lvalue is properly
* rooted. See "Move GC Stack Rooting" above.
*
* If you want to add additional methods to Handle for a specific
* specialization, define a HandleBase<T> specialization containing them.
*/
template <typename T>
class MOZ_NONHEAP_CLASS Handle : public js::HandleBase<T>
{
friend class JS::MutableHandle<T>;
public:
/* Creates a handle from a handle of a type convertible to T. */
template <typename S>
Handle(Handle<S> handle,
typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0)
{
static_assert(sizeof(Handle<T>) == sizeof(T *),
"Handle must be binary compatible with T*.");
ptr = reinterpret_cast<const T *>(handle.address());
}
/* Create a handle for a nullptr pointer. */
MOZ_IMPLICIT Handle(js::NullPtr) {
static_assert(mozilla::IsPointer<T>::value,
"js::NullPtr overload not valid for non-pointer types");
ptr = reinterpret_cast<const T *>(&js::NullPtr::constNullValue);
}
/* Create a handle for a nullptr pointer. */
MOZ_IMPLICIT Handle(JS::NullPtr) {
static_assert(mozilla::IsPointer<T>::value,
"JS::NullPtr overload not valid for non-pointer types");
ptr = reinterpret_cast<const T *>(&JS::NullPtr::constNullValue);
}
MOZ_IMPLICIT Handle(MutableHandle<T> handle) {
ptr = handle.address();
}
/*
* Take care when calling this method!
*
* This creates a Handle from the raw location of a T.
*
* It should be called only if the following conditions hold:
*
* 1) the location of the T is guaranteed to be marked (for some reason
* other than being a Rooted), e.g., if it is guaranteed to be reachable
* from an implicit root.
*
* 2) the contents of the location are immutable, or at least cannot change
* for the lifetime of the handle, as its users may not expect its value
* to change underneath them.
*/
static MOZ_CONSTEXPR Handle fromMarkedLocation(const T *p) {
return Handle(p, DeliberatelyChoosingThisOverload,
ImUsingThisOnlyInFromFromMarkedLocation);
}
/*
* Construct a handle from an explicitly rooted location. This is the
* normal way to create a handle, and normally happens implicitly.
*/
template <typename S>
inline
Handle(const Rooted<S> &root,
typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0);
template <typename S>
inline
Handle(const PersistentRooted<S> &root,
typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0);
/* Construct a read only handle from a mutable handle. */
template <typename S>
inline
Handle(MutableHandle<S> &root,
typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0);
const T *address() const { return ptr; }
const T& get() const { return *ptr; }
/*
* Return a reference so passing a Handle<T> to something that
* takes a |const T&| is not a GC hazard.
*/
operator const T&() const { return get(); }
T operator->() const { return get(); }
bool operator!=(const T &other) const { return *ptr != other; }
bool operator==(const T &other) const { return *ptr == other; }
private:
Handle() {}
enum Disambiguator { DeliberatelyChoosingThisOverload = 42 };
enum CallerIdentity { ImUsingThisOnlyInFromFromMarkedLocation = 17 };
MOZ_CONSTEXPR Handle(const T *p, Disambiguator, CallerIdentity) : ptr(p) {}
const T *ptr;
template <typename S> void operator=(S) MOZ_DELETE;
void operator=(Handle) MOZ_DELETE;
};
/*
* Similar to a handle, but the underlying storage can be changed. This is
* useful for outparams.
*
* If you want to add additional methods to MutableHandle for a specific
* specialization, define a MutableHandleBase<T> specialization containing
* them.
*/
template <typename T>
class MOZ_STACK_CLASS MutableHandle : public js::MutableHandleBase<T>
{
public:
inline MOZ_IMPLICIT MutableHandle(Rooted<T> *root);
inline MOZ_IMPLICIT MutableHandle(PersistentRooted<T> *root);
private:
// Disallow true nullptr and emulated nullptr (gcc 4.4/4.5, __null, appears
// as int/long [32/64-bit]) for overloading purposes.
template<typename N>
MutableHandle(N,
typename mozilla::EnableIf<mozilla::IsNullPointer<N>::value ||
mozilla::IsSame<N, int>::value ||
mozilla::IsSame<N, long>::value,
int>::Type dummy = 0)
MOZ_DELETE;
public:
void set(T v) {
MOZ_ASSERT(!js::GCMethods<T>::poisoned(v));
*ptr = v;
}
/*
* This may be called only if the location of the T is guaranteed
* to be marked (for some reason other than being a Rooted),
* e.g., if it is guaranteed to be reachable from an implicit root.
*
* Create a MutableHandle from a raw location of a T.
*/
static MutableHandle fromMarkedLocation(T *p) {
MutableHandle h;
h.ptr = p;
return h;
}
T *address() const { return ptr; }
const T& get() const { return *ptr; }
/*
* Return a reference so passing a MutableHandle<T> to something that takes
* a |const T&| is not a GC hazard.
*/
operator const T&() const { return get(); }
T operator->() const { return get(); }
private:
MutableHandle() {}
T *ptr;
template <typename S> void operator=(S v) MOZ_DELETE;
void operator=(MutableHandle other) MOZ_DELETE;
};
#ifdef JSGC_GENERATIONAL
JS_FRIEND_API(void) HeapCellPostBarrier(js::gc::Cell **cellp);
JS_FRIEND_API(void) HeapCellRelocate(js::gc::Cell **cellp);
#endif
} /* namespace JS */
namespace js {
/*
* InternalHandle is a handle to an internal pointer into a gcthing. Use
* InternalHandle when you have a pointer to a direct field of a gcthing, or
* when you need a parameter type for something that *may* be a pointer to a
* direct field of a gcthing.
*/
template <typename T>
class InternalHandle {};
template <typename T>
class InternalHandle<T*>
{
void * const *holder;
size_t offset;
public:
/*
* Create an InternalHandle using a Handle to the gcthing containing the
* field in question, and a pointer to the field.
*/
template<typename H>
InternalHandle(const JS::Handle<H> &handle, T *field)
: holder((void**)handle.address()), offset(uintptr_t(field) - uintptr_t(handle.get()))
{}
/*
* Create an InternalHandle to a field within a Rooted<>.
*/
template<typename R>
InternalHandle(const JS::Rooted<R> &root, T *field)
: holder((void**)root.address()), offset(uintptr_t(field) - uintptr_t(root.get()))
{}
InternalHandle(const InternalHandle<T*>& other)
: holder(other.holder), offset(other.offset) {}
T *get() const { return reinterpret_cast<T*>(uintptr_t(*holder) + offset); }
const T &operator*() const { return *get(); }
T *operator->() const { return get(); }
static InternalHandle<T*> fromMarkedLocation(T *fieldPtr) {
return InternalHandle(fieldPtr);
}
private:
/*
* Create an InternalHandle to something that is not a pointer to a
* gcthing, and so does not need to be rooted in the first place. Use these
* InternalHandles to pass pointers into functions that also need to accept
* regular InternalHandles to gcthing fields.
*
* Make this private to prevent accidental misuse; this is only for
* fromMarkedLocation().
*/
explicit InternalHandle(T *field)
: holder(reinterpret_cast<void * const *>(&js::NullPtr::constNullValue)),
offset(uintptr_t(field))
{}
void operator=(InternalHandle<T*> other) MOZ_DELETE;
};
/*
* By default, things should use the inheritance hierarchy to find their
* ThingRootKind. Some pointer types are explicitly set in jspubtd.h so that
* Rooted<T> may be used without the class definition being available.
*/
template <typename T>
struct RootKind
{
static ThingRootKind rootKind() { return T::rootKind(); }
};
template <typename T>
struct RootKind<T *>
{
static ThingRootKind rootKind() { return T::rootKind(); }
};
template <typename T>
struct GCMethods<T *>
{
static T *initial() { return nullptr; }
static bool poisoned(T *v) { return JS::IsPoisonedPtr(v); }
static bool needsPostBarrier(T *v) { return false; }
#ifdef JSGC_GENERATIONAL
static void postBarrier(T **vp) {}
static void relocate(T **vp) {}
#endif
};
template <>
struct GCMethods<JSObject *>
{
static JSObject *initial() { return nullptr; }
static bool poisoned(JSObject *v) { return JS::IsPoisonedPtr(v); }
static bool needsPostBarrier(JSObject *v) {
return v != nullptr && gc::IsInsideNursery(reinterpret_cast<gc::Cell *>(v));
}
#ifdef JSGC_GENERATIONAL
static void postBarrier(JSObject **vp) {
JS::HeapCellPostBarrier(reinterpret_cast<js::gc::Cell **>(vp));
}
static void relocate(JSObject **vp) {
JS::HeapCellRelocate(reinterpret_cast<js::gc::Cell **>(vp));
}
#endif
};
template <>
struct GCMethods<JSFunction *>
{
static JSFunction *initial() { return nullptr; }
static bool poisoned(JSFunction *v) { return JS::IsPoisonedPtr(v); }
static bool needsPostBarrier(JSFunction *v) {
return v != nullptr && gc::IsInsideNursery(reinterpret_cast<gc::Cell *>(v));
}
#ifdef JSGC_GENERATIONAL
static void postBarrier(JSFunction **vp) {
JS::HeapCellPostBarrier(reinterpret_cast<js::gc::Cell **>(vp));
}
static void relocate(JSFunction **vp) {
JS::HeapCellRelocate(reinterpret_cast<js::gc::Cell **>(vp));
}
#endif
};
#ifdef JS_DEBUG
/* This helper allows us to assert that Rooted<T> is scoped within a request. */
extern JS_PUBLIC_API(bool)
IsInRequest(JSContext *cx);
#endif
} /* namespace js */
namespace JS {
/*
* Local variable of type T whose value is always rooted. This is typically
* used for local variables, or for non-rooted values being passed to a
* function that requires a handle, e.g. Foo(Root<T>(cx, x)).
*
* If you want to add additional methods to Rooted for a specific
* specialization, define a RootedBase<T> specialization containing them.
*/
template <typename T>
class MOZ_STACK_CLASS Rooted : public js::RootedBase<T>
{
/* Note: CX is a subclass of either ContextFriendFields or PerThreadDataFriendFields. */
template <typename CX>
void init(CX *cx) {
#ifdef JSGC_TRACK_EXACT_ROOTS
js::ThingRootKind kind = js::RootKind<T>::rootKind();
this->stack = &cx->thingGCRooters[kind];
this->prev = *stack;
*stack = reinterpret_cast<Rooted<void*>*>(this);
MOZ_ASSERT(!js::GCMethods<T>::poisoned(ptr));
#endif
}
public:
explicit Rooted(JSContext *cx
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(js::GCMethods<T>::initial())
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
#ifdef JS_DEBUG
MOZ_ASSERT(js::IsInRequest(cx));
#endif
init(js::ContextFriendFields::get(cx));
}
Rooted(JSContext *cx, T initial
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(initial)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
#ifdef JS_DEBUG
MOZ_ASSERT(js::IsInRequest(cx));
#endif
init(js::ContextFriendFields::get(cx));
}
explicit Rooted(js::ContextFriendFields *cx
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(js::GCMethods<T>::initial())
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(cx);
}
Rooted(js::ContextFriendFields *cx, T initial
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(initial)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(cx);
}
explicit Rooted(js::PerThreadDataFriendFields *pt
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(js::GCMethods<T>::initial())
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(pt);
}
Rooted(js::PerThreadDataFriendFields *pt, T initial
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(initial)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(pt);
}
explicit Rooted(JSRuntime *rt
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(js::GCMethods<T>::initial())
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(js::PerThreadDataFriendFields::getMainThread(rt));
}
Rooted(JSRuntime *rt, T initial
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(initial)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(js::PerThreadDataFriendFields::getMainThread(rt));
}
// Note that we need to let the compiler generate the default destructor in
// non-exact-rooting builds because of a bug in the instrumented PGO builds
// using MSVC, see bug 915735 for more details.
#ifdef JSGC_TRACK_EXACT_ROOTS
~Rooted() {
MOZ_ASSERT(*stack == reinterpret_cast<Rooted<void*>*>(this));
*stack = prev;
}
#endif
#ifdef JSGC_TRACK_EXACT_ROOTS
Rooted<T> *previous() { return reinterpret_cast<Rooted<T>*>(prev); }
#endif
/*
* Important: Return a reference here so passing a Rooted<T> to
* something that takes a |const T&| is not a GC hazard.
*/
operator const T&() const { return ptr; }
T operator->() const { return ptr; }
T *address() { return &ptr; }
const T *address() const { return &ptr; }
T &get() { return ptr; }
const T &get() const { return ptr; }
T &operator=(T value) {
MOZ_ASSERT(!js::GCMethods<T>::poisoned(value));
ptr = value;
return ptr;
}
T &operator=(const Rooted &value) {
ptr = value;
return ptr;
}
void set(T value) {
MOZ_ASSERT(!js::GCMethods<T>::poisoned(value));
ptr = value;
}
bool operator!=(const T &other) const { return ptr != other; }
bool operator==(const T &other) const { return ptr == other; }
private:
#ifdef JSGC_TRACK_EXACT_ROOTS
/*
* These need to be templated on void* to avoid aliasing issues between, for
* example, Rooted<JSObject> and Rooted<JSFunction>, which use the same
* stack head pointer for different classes.
*/
Rooted<void *> **stack, *prev;
#endif
/*
* |ptr| must be the last field in Rooted because the analysis treats all
* Rooted as Rooted<void*> during the analysis. See bug 829372.
*/
T ptr;
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
Rooted(const Rooted &) MOZ_DELETE;
};
} /* namespace JS */
namespace js {
/*
* Augment the generic Rooted<T> interface when T = JSObject* with
* class-querying and downcasting operations.
*
* Given a Rooted<JSObject*> obj, one can view
* Handle<StringObject*> h = obj.as<StringObject*>();
* as an optimization of
* Rooted<StringObject*> rooted(cx, &obj->as<StringObject*>());
* Handle<StringObject*> h = rooted;
*/
template <>
class RootedBase<JSObject*>
{
public:
template <class U>
JS::Handle<U*> as() const;
};
/* Interface substitute for Rooted<T> which does not root the variable's memory. */
template <typename T>
class FakeRooted : public RootedBase<T>
{
public:
template <typename CX>
FakeRooted(CX *cx
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(GCMethods<T>::initial())
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
}
template <typename CX>
FakeRooted(CX *cx, T initial
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(initial)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
}
operator T() const { return ptr; }
T operator->() const { return ptr; }
T *address() { return &ptr; }
const T *address() const { return &ptr; }
T &get() { return ptr; }
const T &get() const { return ptr; }
FakeRooted<T> &operator=(T value) {
MOZ_ASSERT(!GCMethods<T>::poisoned(value));
ptr = value;
return *this;
}
FakeRooted<T> &operator=(const FakeRooted<T> &other) {
MOZ_ASSERT(!GCMethods<T>::poisoned(other.ptr));
ptr = other.ptr;
return *this;
}
bool operator!=(const T &other) const { return ptr != other; }
bool operator==(const T &other) const { return ptr == other; }
private:
T ptr;
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
FakeRooted(const FakeRooted &) MOZ_DELETE;
};
/* Interface substitute for MutableHandle<T> which is not required to point to rooted memory. */
template <typename T>
class FakeMutableHandle : public js::MutableHandleBase<T>
{
public:
MOZ_IMPLICIT FakeMutableHandle(T *t) {
ptr = t;
}
MOZ_IMPLICIT FakeMutableHandle(FakeRooted<T> *root) {
ptr = root->address();
}
void set(T v) {
MOZ_ASSERT(!js::GCMethods<T>::poisoned(v));
*ptr = v;
}
T *address() const { return ptr; }
T get() const { return *ptr; }
operator T() const { return get(); }
T operator->() const { return get(); }
private:
FakeMutableHandle() {}
T *ptr;
template <typename S>
void operator=(S v) MOZ_DELETE;
void operator=(const FakeMutableHandle<T>& other) MOZ_DELETE;
};
/*
* Types for a variable that either should or shouldn't be rooted, depending on
* the template parameter allowGC. Used for implementing functions that can
* operate on either rooted or unrooted data.
*
* The toHandle() and toMutableHandle() functions are for calling functions
* which require handle types and are only called in the CanGC case. These
* allow the calling code to type check.
*/
enum AllowGC {
NoGC = 0,
CanGC = 1
};
template <typename T, AllowGC allowGC>
class MaybeRooted
{
};
template <typename T> class MaybeRooted<T, CanGC>
{
public:
typedef JS::Handle<T> HandleType;
typedef JS::Rooted<T> RootType;
typedef JS::MutableHandle<T> MutableHandleType;
static inline JS::Handle<T> toHandle(HandleType v) {
return v;
}
static inline JS::MutableHandle<T> toMutableHandle(MutableHandleType v) {
return v;
}
};
template <typename T> class MaybeRooted<T, NoGC>
{
public:
typedef T HandleType;
typedef FakeRooted<T> RootType;
typedef FakeMutableHandle<T> MutableHandleType;
static JS::Handle<T> toHandle(HandleType v) {
MOZ_CRASH("Bad conversion");
}
static JS::MutableHandle<T> toMutableHandle(MutableHandleType v) {
MOZ_CRASH("Bad conversion");
}
};
} /* namespace js */
namespace JS {
template <typename T> template <typename S>
inline
Handle<T>::Handle(const Rooted<S> &root,
typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy)
{
ptr = reinterpret_cast<const T *>(root.address());
}
template <typename T> template <typename S>
inline
Handle<T>::Handle(const PersistentRooted<S> &root,
typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy)
{
ptr = reinterpret_cast<const T *>(root.address());
}
template <typename T> template <typename S>
inline
Handle<T>::Handle(MutableHandle<S> &root,
typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy)
{
ptr = reinterpret_cast<const T *>(root.address());
}
template <typename T>
inline
MutableHandle<T>::MutableHandle(Rooted<T> *root)
{
static_assert(sizeof(MutableHandle<T>) == sizeof(T *),
"MutableHandle must be binary compatible with T*.");
ptr = root->address();
}
template <typename T>
inline
MutableHandle<T>::MutableHandle(PersistentRooted<T> *root)
{
static_assert(sizeof(MutableHandle<T>) == sizeof(T *),
"MutableHandle must be binary compatible with T*.");
ptr = root->address();
}
/*
* A copyable, assignable global GC root type with arbitrary lifetime, an
* infallible constructor, and automatic unrooting on destruction.
*
* These roots can be used in heap-allocated data structures, so they are not
* associated with any particular JSContext or stack. They are registered with
* the JSRuntime itself, without locking, so they require a full JSContext to be
* constructed, not one of its more restricted superclasses.
*
* Note that you must not use an PersistentRooted in an object owned by a JS
* object:
*
* Whenever one object whose lifetime is decided by the GC refers to another
* such object, that edge must be traced only if the owning JS object is traced.
* This applies not only to JS objects (which obviously are managed by the GC)
* but also to C++ objects owned by JS objects.
*
* If you put a PersistentRooted in such a C++ object, that is almost certainly
* a leak. When a GC begins, the referent of the PersistentRooted is treated as
* live, unconditionally (because a PersistentRooted is a *root*), even if the
* JS object that owns it is unreachable. If there is any path from that
* referent back to the JS object, then the C++ object containing the
* PersistentRooted will not be destructed, and the whole blob of objects will
* not be freed, even if there are no references to them from the outside.
*
* In the context of Firefox, this is a severe restriction: almost everything in
* Firefox is owned by some JS object or another, so using PersistentRooted in
* such objects would introduce leaks. For these kinds of edges, Heap<T> or
* TenuredHeap<T> would be better types. It's up to the implementor of the type
* containing Heap<T> or TenuredHeap<T> members to make sure their referents get
* marked when the object itself is marked.
*/
template<typename T>
class PersistentRooted : private mozilla::LinkedListElement<PersistentRooted<T> > {
friend class mozilla::LinkedList<PersistentRooted>;
friend class mozilla::LinkedListElement<PersistentRooted>;
friend struct js::gc::PersistentRootedMarker<T>;
void registerWithRuntime(JSRuntime *rt) {
JS::shadow::Runtime *srt = JS::shadow::Runtime::asShadowRuntime(rt);
srt->getPersistentRootedList<T>().insertBack(this);
}
public:
explicit PersistentRooted(JSContext *cx) : ptr(js::GCMethods<T>::initial())
{
registerWithRuntime(js::GetRuntime(cx));
}
PersistentRooted(JSContext *cx, T initial) : ptr(initial)
{
registerWithRuntime(js::GetRuntime(cx));
}
explicit PersistentRooted(JSRuntime *rt) : ptr(js::GCMethods<T>::initial())
{
registerWithRuntime(rt);
}
PersistentRooted(JSRuntime *rt, T initial) : ptr(initial)
{
registerWithRuntime(rt);
}
PersistentRooted(const PersistentRooted &rhs)
: mozilla::LinkedListElement<PersistentRooted<T> >(),
ptr(rhs.ptr)
{
/*
* Copy construction takes advantage of the fact that the original
* is already inserted, and simply adds itself to whatever list the
* original was on - no JSRuntime pointer needed.
*
* This requires mutating rhs's links, but those should be 'mutable'
* anyway. C++ doesn't let us declare mutable base classes.
*/
const_cast<PersistentRooted &>(rhs).setNext(this);
}
/*
* Important: Return a reference here so passing a Rooted<T> to
* something that takes a |const T&| is not a GC hazard.
*/
operator const T&() const { return ptr; }
T operator->() const { return ptr; }
T *address() { return &ptr; }
const T *address() const { return &ptr; }
T &get() { return ptr; }
const T &get() const { return ptr; }
T &operator=(T value) {
MOZ_ASSERT(!js::GCMethods<T>::poisoned(value));
ptr = value;
return ptr;
}
T &operator=(const PersistentRooted &value) {
ptr = value;
return ptr;
}
void set(T value) {
MOZ_ASSERT(!js::GCMethods<T>::poisoned(value));
ptr = value;
}
bool operator!=(const T &other) const { return ptr != other; }
bool operator==(const T &other) const { return ptr == other; }
private:
T ptr;
};
class JS_PUBLIC_API(ObjectPtr)
{
Heap<JSObject *> value;
public:
ObjectPtr() : value(nullptr) {}
explicit ObjectPtr(JSObject *obj) : value(obj) {}
/* Always call finalize before the destructor. */
~ObjectPtr() { MOZ_ASSERT(!value); }
void finalize(JSRuntime *rt) {
if (IsIncrementalBarrierNeeded(rt))
IncrementalObjectBarrier(value);
value = nullptr;
}
void init(JSObject *obj) { value = obj; }
JSObject *get() const { return value; }
void writeBarrierPre(JSRuntime *rt) {
IncrementalObjectBarrier(value);
}
bool isAboutToBeFinalized();
ObjectPtr &operator=(JSObject *obj) {
IncrementalObjectBarrier(value);
value = obj;
return *this;
}
void trace(JSTracer *trc, const char *name);
JSObject &operator*() const { return *value; }
JSObject *operator->() const { return value; }
operator JSObject *() const { return value; }
};
} /* namespace JS */
namespace js {
/* Base class for automatic read-only object rooting during compilation. */
class CompilerRootNode
{
protected:
explicit CompilerRootNode(js::gc::Cell *ptr) : next(nullptr), ptr_(ptr) {}
public:
void **address() { return (void **)&ptr_; }
public:
CompilerRootNode *next;
protected:
js::gc::Cell *ptr_;
};
} /* namespace js */
#endif /* js_RootingAPI_h */
| {
"pile_set_name": "Github"
} |
/**
* nat API
* nat API generated from nat.yang
*
* OpenAPI spec version: 1.0.0
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/polycube-network/swagger-codegen.git
* branch polycube
*/
/* Do not edit this file manually */
/*
* RulePortForwardingJsonObject.h
*
*
*/
#pragma once
#include "JsonObjectBase.h"
#include "RulePortForwardingEntryJsonObject.h"
#include <vector>
namespace io {
namespace swagger {
namespace server {
namespace model {
/// <summary>
///
/// </summary>
class RulePortForwardingJsonObject : public JsonObjectBase {
public:
RulePortForwardingJsonObject();
RulePortForwardingJsonObject(const nlohmann::json &json);
~RulePortForwardingJsonObject() final = default;
nlohmann::json toJson() const final;
/// <summary>
/// List of port forwarding rules
/// </summary>
const std::vector<RulePortForwardingEntryJsonObject>& getEntry() const;
void addRulePortForwardingEntry(RulePortForwardingEntryJsonObject value);
bool entryIsSet() const;
void unsetEntry();
private:
std::vector<RulePortForwardingEntryJsonObject> m_entry;
bool m_entryIsSet;
};
}
}
}
}
| {
"pile_set_name": "Github"
} |
/*!
@file
Defines `boost::hana::find_if`.
@copyright Louis Dionne 2013-2016
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_FIND_IF_HPP
#define BOOST_HANA_FIND_IF_HPP
#include <boost/hana/fwd/find_if.hpp>
#include <boost/hana/accessors.hpp>
#include <boost/hana/at.hpp>
#include <boost/hana/bool.hpp>
#include <boost/hana/concept/iterable.hpp>
#include <boost/hana/concept/searchable.hpp>
#include <boost/hana/concept/sequence.hpp>
#include <boost/hana/concept/struct.hpp>
#include <boost/hana/config.hpp>
#include <boost/hana/core/dispatch.hpp>
#include <boost/hana/detail/decay.hpp>
#include <boost/hana/drop_while.hpp>
#include <boost/hana/first.hpp>
#include <boost/hana/front.hpp>
#include <boost/hana/functional/compose.hpp>
#include <boost/hana/is_empty.hpp>
#include <boost/hana/length.hpp>
#include <boost/hana/not.hpp>
#include <boost/hana/optional.hpp>
#include <boost/hana/second.hpp>
#include <boost/hana/transform.hpp>
#include <cstddef>
#include <utility>
BOOST_HANA_NAMESPACE_BEGIN
//! @cond
template <typename Xs, typename Pred>
constexpr auto find_if_t::operator()(Xs&& xs, Pred&& pred) const {
using S = typename hana::tag_of<Xs>::type;
using FindIf = BOOST_HANA_DISPATCH_IF(find_if_impl<S>,
hana::Searchable<S>::value
);
#ifndef BOOST_HANA_CONFIG_DISABLE_CONCEPT_CHECKS
static_assert(hana::Searchable<S>::value,
"hana::find_if(xs, pred) requires 'xs' to be a Searchable");
#endif
return FindIf::apply(static_cast<Xs&&>(xs), static_cast<Pred&&>(pred));
}
//! @endcond
template <typename S, bool condition>
struct find_if_impl<S, when<condition>> : default_ {
template <typename ...Args>
static constexpr auto apply(Args&& ...) = delete;
};
namespace detail {
template <typename Xs, typename Pred, std::size_t i, std::size_t N, bool Done>
struct advance_until;
template <typename Xs, typename Pred, std::size_t i, std::size_t N>
struct advance_until<Xs, Pred, i, N, false>
: advance_until<Xs, Pred, i + 1, N, static_cast<bool>(detail::decay<decltype(
std::declval<Pred>()(hana::at_c<i>(std::declval<Xs>()))
)>::type::value)>
{ };
template <typename Xs, typename Pred, std::size_t N>
struct advance_until<Xs, Pred, N, N, false> {
template <typename Ys>
static constexpr auto apply(Ys&&) {
return hana::nothing;
}
};
template <typename Xs, typename Pred, std::size_t i, std::size_t N>
struct advance_until<Xs, Pred, i, N, true> {
template <typename Ys>
static constexpr auto apply(Ys&& ys) {
return hana::just(hana::at_c<i - 1>(static_cast<Ys&&>(ys)));
}
};
}
template <typename S>
struct find_if_impl<S, when<Sequence<S>::value>> {
template <typename Xs, typename Pred>
static constexpr auto apply(Xs&& xs, Pred&&) {
constexpr std::size_t N = decltype(hana::length(xs))::value;
return detail::advance_until<Xs&&, Pred&&, 0, N, false>::apply(
static_cast<Xs&&>(xs)
);
}
};
template <typename It>
struct find_if_impl<It, when<hana::Iterable<It>::value && !Sequence<It>::value>> {
template <typename Xs, typename Pred>
static constexpr auto find_if_helper(Xs&& xs, Pred&& pred, hana::true_) {
return hana::just(hana::front(
hana::drop_while(static_cast<Xs&&>(xs),
hana::compose(hana::not_, static_cast<Pred&&>(pred)))
));
}
template <typename Xs, typename Pred>
static constexpr auto find_if_helper(Xs&&, Pred&&, hana::false_) {
return hana::nothing;
}
template <typename Xs, typename Pred>
static constexpr auto apply(Xs&& xs, Pred&& pred) {
constexpr bool found = !decltype(
hana::is_empty(hana::drop_while(static_cast<Xs&&>(xs),
hana::compose(hana::not_, static_cast<Pred&&>(pred))))
)::value;
return find_if_impl::find_if_helper(static_cast<Xs&&>(xs),
static_cast<Pred&&>(pred),
hana::bool_<found>{});
}
};
template <typename T, std::size_t N>
struct find_if_impl<T[N]> {
template <typename Xs>
static constexpr auto find_if_helper(Xs&&, hana::false_)
{ return hana::nothing; }
template <typename Xs>
static constexpr auto find_if_helper(Xs&& xs, hana::true_)
{ return hana::just(static_cast<Xs&&>(xs)[0]); }
template <typename Xs, typename Pred>
static constexpr auto apply(Xs&& xs, Pred&& pred) {
return find_if_helper(static_cast<Xs&&>(xs),
hana::bool_c<decltype(
static_cast<Pred&&>(pred)(static_cast<Xs&&>(xs)[0])
)::value>
);
}
};
namespace struct_detail {
template <typename X>
struct get_member {
X x;
template <typename Member>
constexpr decltype(auto) operator()(Member&& member) && {
return hana::second(static_cast<Member&&>(member))(
static_cast<X&&>(x)
);
}
};
}
template <typename S>
struct find_if_impl<S, when<hana::Struct<S>::value>> {
template <typename X, typename Pred>
static constexpr decltype(auto) apply(X&& x, Pred&& pred) {
return hana::transform(
hana::find_if(hana::accessors<S>(),
hana::compose(static_cast<Pred&&>(pred), hana::first)
),
struct_detail::get_member<X>{static_cast<X&&>(x)}
);
}
};
BOOST_HANA_NAMESPACE_END
#endif // !BOOST_HANA_FIND_IF_HPP
| {
"pile_set_name": "Github"
} |
perl ../util/mkerr.pl -conf e_capi.ec -nostatic -staticloader -write e_capi.c
| {
"pile_set_name": "Github"
} |
/**
* Simple tool to create conversion table
*/
#include <gammu.h>
int main(int argc, char *argv[])
{
unsigned char InputBuffer[10000], Buffer[10000];
FILE *file;
int size, i, j = 0;
if (argc != 2) {
printf("Usage: makeconverttable FILE\n");
return 1;
}
file = fopen(argv[1], "rb");
if (file == NULL) {
printf("Failed to open file: %s\n", argv[1]);
return 2;
}
size = fread(InputBuffer, 1, 10000 - 1, file);
fclose(file);
InputBuffer[size] = 0;
InputBuffer[size + 1] = 0;
ReadUnicodeFile(Buffer, InputBuffer);
for (i = 0; i < ((int)UnicodeLength(Buffer)); i++) {
j++;
if (j == 100) {
printf("\"\\\n\"");
j = 0;
}
printf("\\x%02x\\x%02x", Buffer[i * 2], Buffer[i * 2 + 1]);
}
printf("\\x00\\x00");
return 0;
}
| {
"pile_set_name": "Github"
} |
from .config import Config
| {
"pile_set_name": "Github"
} |
<script src="https://code.highcharts.com/highcharts.js"></script>
<div id="container" style="height: 400px; min-width: 300px; max-width: 700px; margin: 0 auto;"></div> | {
"pile_set_name": "Github"
} |
from unittest import TestCase
from pippi import dsp, multiband, fx
import numpy as np
import random
class TestMultiband(TestCase):
def test_split(self):
g = dsp.read('tests/sounds/guitar1s.wav')
bands = multiband.split(g, 3)
for i, b in enumerate(bands):
b.write('tests/renders/multiband_split-band%02d.wav' % i)
out = dsp.mix(bands)
out = fx.norm(out, 1)
out.write('tests/renders/multiband_split-reconstruct.wav')
def test_split_and_drift(self):
g = dsp.read('tests/sounds/guitar1s.wav')
bands = multiband.split(g, 3, 'hann', 100)
for i, b in enumerate(bands):
b.write('tests/renders/multiband_split-drift-band%02d.wav' % i)
out = dsp.mix(bands)
out = fx.norm(out, 1)
out.write('tests/renders/multiband_split-drift-reconstruct.wav')
def test_customsplit(self):
g = dsp.read('tests/sounds/guitar1s.wav')
freqs = [400, 3000, 3005, 10000]
bands = multiband.customsplit(g, freqs)
for i, b in enumerate(bands):
b.write('tests/renders/multiband_customsplit-band%02d.wav' % i)
out = dsp.mix(bands)
out = fx.norm(out, 1)
out.write('tests/renders/multiband_customsplit-reconstruct.wav')
def test_spread(self):
dsp.seed()
g = dsp.read('tests/sounds/guitar1s.wav')
out = multiband.spread(g)
out.write('tests/renders/multiband_spread-guitar-0.5.wav')
"""
dsp.seed()
g = dsp.read('tests/sounds/living.wav')
out = multiband.spread(g)
out.write('tests/renders/multiband_spread-living-0.5.wav')
"""
def test_smear(self):
dsp.seed()
g = dsp.read('tests/sounds/guitar10s.wav').rcut(1)
out = multiband.smear(g)
out.write('tests/renders/multiband_smear-guitar-0.01.wav')
dsp.seed()
g = dsp.read('tests/sounds/living.wav')
out = multiband.smear(g)
out.write('tests/renders/multiband_smear-living-0.01.wav')
| {
"pile_set_name": "Github"
} |
/* ====================================================================
* Copyright (c) 1995-2002 Carnegie Mellon University. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* This work was supported in part by funding from the Defense Advanced
* Research Projects Agency and the National Science Foundation of the
* United States of America, and the CMU Sphinx Speech Consortium.
*
* THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
* ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY
* NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ====================================================================
*
*/
/*
* feat_1s_c_dd.c -- Cepstral feature stream; Sphinx3 version: single vector of
* 12 cep, 12 dcep, 3 pow, 12 ddcep.
*
* **********************************************
* CMU ARPA Speech Project
*
* Copyright (c) 1996 Carnegie Mellon University.
* ALL RIGHTS RESERVED.
* **********************************************
*
* HISTORY
*
* 10-Jan-96 M K Ravishankar ([email protected]) at Carnegie Mellon University
* Created.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <assert.h>
#include "feat_1s_c_dd.h"
#include <libutil/libutil.h>
#include <libio/libio.h>
#define N_FEAT 1
#define FEAT_DCEP_WIN 2
static int32 feat_size[1];
static int32 basefeatlen;
int32 feat_1s_c_dd_cepsize ( int32 veclen )
{
basefeatlen = veclen;
feat_size[0] = veclen+veclen;
return (veclen);
}
int32 feat_1s_c_dd_featsize (int32 **size)
{
*size = feat_size;
return N_FEAT;
}
/*
* Feature vectors computed from input mfc vectors using this window (+/- window).
*/
int32 feat_1s_c_dd_window_size ( void )
{
return (FEAT_DCEP_WIN+1);
}
void feat_1s_c_dd_cep2feat (float32 **mfc, float32 **feat)
{
float32 *f;
float32 *w1, *w_1, *_w1, *_w_1;
float32 d1, d2;
int32 i, j;
/* CEP */
memcpy (feat[0], mfc[0], basefeatlen * sizeof(float32));
/*
* D2CEP: (mfc[w+1] - mfc[-w+1]) - (mfc[w-1] - mfc[-w-1]),
* where w = FEAT_DCEP_WIN
*/
f = feat[0] + basefeatlen;
w1 = mfc[ FEAT_DCEP_WIN+1];
_w1 = mfc[-FEAT_DCEP_WIN+1];
w_1 = mfc[ FEAT_DCEP_WIN-1];
_w_1 = mfc[-FEAT_DCEP_WIN-1];
for (i = 0; i < basefeatlen; i++) {
d1 = w1[i] - _w1[i];
d2 = w_1[i] - _w_1[i];
f[i] = d1 - d2;
}
}
| {
"pile_set_name": "Github"
} |
using System.Collections.Generic;
using System.Linq;
using UnityEngine;
using UnityEngine.Timeline;
namespace UnityEditor.Timeline
{
class TimelineClipGroup
{
readonly TimelineClip[] m_Clips;
readonly TimelineClip m_LeftMostClip;
readonly TimelineClip m_RightMostClip;
public TimelineClip[] clips
{
get { return m_Clips; }
}
public double start
{
get { return m_LeftMostClip.start; }
set
{
var offset = value - m_LeftMostClip.start;
foreach (var clip in m_Clips)
clip.start += offset;
}
}
public double end
{
get { return m_RightMostClip.end; }
}
public TimelineClipGroup(IEnumerable<TimelineClip> clips)
{
Debug.Assert(clips != null && clips.Any());
m_Clips = clips.ToArray();
m_LeftMostClip = null;
m_RightMostClip = null;
foreach (var clip in m_Clips)
{
if (m_LeftMostClip == null || clip.start < m_LeftMostClip.start)
m_LeftMostClip = clip;
if (m_RightMostClip == null || clip.end > m_RightMostClip.end)
m_RightMostClip = clip;
}
}
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<doc>
<assembly>
<name>System.Web.Http.WebHost</name>
</assembly>
<members>
<member name="T:System.Web.Http.GlobalConfiguration">
<summary> Provides a global <see cref="T:System.Web.Http.HttpConfiguration" /> for ASP.NET applications. </summary>
</member>
<member name="P:System.Web.Http.GlobalConfiguration.Configuration"></member>
<member name="P:System.Web.Http.GlobalConfiguration.DefaultHandler">
<summary> Gets the default message handler that will be called for all requests. </summary>
</member>
<member name="T:System.Web.Http.RouteCollectionExtensions">
<summary> Extension methods for <see cref="T:System.Web.Routing.RouteCollection" /></summary>
</member>
<member name="M:System.Web.Http.RouteCollectionExtensions.MapHttpRoute(System.Web.Routing.RouteCollection,System.String,System.String)">
<summary>Maps the specified route template.</summary>
<returns>A reference to the mapped route.</returns>
<param name="routes">A collection of routes for the application.</param>
<param name="name">The name of the route to map.</param>
<param name="routeTemplate">The route template for the route.</param>
</member>
<member name="M:System.Web.Http.RouteCollectionExtensions.MapHttpRoute(System.Web.Routing.RouteCollection,System.String,System.String,System.Object)">
<summary>Maps the specified route template and sets default route.</summary>
<returns>A reference to the mapped route.</returns>
<param name="routes">A collection of routes for the application.</param>
<param name="name">The name of the route to map.</param>
<param name="routeTemplate">The route template for the route.</param>
<param name="defaults">An object that contains default route values.</param>
</member>
<member name="M:System.Web.Http.RouteCollectionExtensions.MapHttpRoute(System.Web.Routing.RouteCollection,System.String,System.String,System.Object,System.Object)">
<summary>Maps the specified route template and sets default route values and constraints.</summary>
<returns>A reference to the mapped route.</returns>
<param name="routes">A collection of routes for the application.</param>
<param name="name">The name of the route to map.</param>
<param name="routeTemplate">The route template for the route.</param>
<param name="defaults">An object that contains default route values.</param>
<param name="constraints">A set of expressions that specify values for routeTemplate.</param>
</member>
<member name="M:System.Web.Http.RouteCollectionExtensions.MapHttpRoute(System.Web.Routing.RouteCollection,System.String,System.String,System.Object,System.Object,System.Net.Http.HttpMessageHandler)">
<summary>Maps the specified route template and sets default route values, constraints, and end-point message handler.</summary>
<returns>A reference to the mapped route.</returns>
<param name="routes">A collection of routes for the application.</param>
<param name="name">The name of the route to map.</param>
<param name="routeTemplate">The route template for the route.</param>
<param name="defaults">An object that contains default route values.</param>
<param name="constraints">A set of expressions that specify values for routeTemplate.</param>
<param name="handler">The handler to which the request will be dispatched.</param>
</member>
<member name="T:System.Web.Http.WebHost.HttpControllerHandler">
<summary> A <see cref="T:System.Web.IHttpAsyncHandler" /> that passes ASP.NET requests into the <see cref="T:System.Web.Http.HttpServer" /> pipeline and write the result back. </summary>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerHandler.#ctor(System.Web.Routing.RouteData)">
<summary> Initializes a new instance of the <see cref="T:System.Web.Http.WebHost.HttpControllerHandler" /> class. </summary>
<param name="routeData">The route data.</param>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerHandler.BeginProcessRequest(System.Web.HttpContextBase,System.AsyncCallback,System.Object)">
<summary> Begins the process request. </summary>
<returns>An <see cref="T:System.IAsyncResult" /> that contains information about the status of the process. </returns>
<param name="httpContextBase">The HTTP context base.</param>
<param name="callback">The callback.</param>
<param name="state">The state.</param>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerHandler.EndProcessRequest(System.IAsyncResult)">
<summary> Provides an asynchronous process End method when the process ends. </summary>
<param name="result">An <see cref="T:System.IAsyncResult" /> that contains information about the status of the process.</param>
</member>
<member name="P:System.Web.Http.WebHost.HttpControllerHandler.IsReusable">
<summary> Gets a value indicating whether another request can use the <see cref="T:System.Web.IHttpHandler" /> instance. </summary>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerHandler.ProcessRequest(System.Web.HttpContextBase)">
<summary> Processes the request. </summary>
<param name="httpContextBase">The HTTP context base.</param>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerHandler.System#Web#IHttpAsyncHandler#BeginProcessRequest(System.Web.HttpContext,System.AsyncCallback,System.Object)">
<summary> Begins processing the request. </summary>
<returns>An <see cref="T:System.IAsyncResult" /> that contains information about the status of the process. </returns>
<param name="httpContext">The HTTP context.</param>
<param name="callback">The callback.</param>
<param name="state">The state.</param>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerHandler.System#Web#IHttpAsyncHandler#EndProcessRequest(System.IAsyncResult)">
<summary> Provides an asynchronous process End method when the process ends. </summary>
<param name="result">An <see cref="T:System.IAsyncResult" /> that contains information about the status of the process.</param>
</member>
<member name="P:System.Web.Http.WebHost.HttpControllerHandler.System#Web#IHttpHandler#IsReusable">
<summary> Gets a value indicating whether another request can use the <see cref="T:System.Web.IHttpHandler" /> instance. </summary>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerHandler.System#Web#IHttpHandler#ProcessRequest(System.Web.HttpContext)">
<summary> Processes the request. </summary>
<param name="httpContext">The HTTP context base.</param>
</member>
<member name="T:System.Web.Http.WebHost.HttpControllerRouteHandler">
<summary> A <see cref="T:System.Web.Routing.IRouteHandler" /> that returns instances of <see cref="T:System.Web.Http.WebHost.HttpControllerHandler" /> that can pass requests to a given <see cref="T:System.Web.Http.HttpServer" /> instance. </summary>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerRouteHandler.#ctor">
<summary> Initializes a new instance of the <see cref="T:System.Web.Http.WebHost.HttpControllerRouteHandler" /> class. </summary>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerRouteHandler.GetHttpHandler(System.Web.Routing.RequestContext)">
<summary> Provides the object that processes the request. </summary>
<returns> An object that processes the request. </returns>
<param name="requestContext">An object that encapsulates information about the request.</param>
</member>
<member name="P:System.Web.Http.WebHost.HttpControllerRouteHandler.Instance">
<summary> Gets the singleton <see cref="T:System.Web.Http.WebHost.HttpControllerRouteHandler" /> instance. </summary>
</member>
<member name="M:System.Web.Http.WebHost.HttpControllerRouteHandler.System#Web#Routing#IRouteHandler#GetHttpHandler(System.Web.Routing.RequestContext)">
<summary> Provides the object that processes the request. </summary>
<returns> An object that processes the request. </returns>
<param name="requestContext">An object that encapsulates information about the request.</param>
</member>
<member name="T:System.Web.Http.WebHost.PreApplicationStartCode">
<summary>Provides a registration point for the simple membership pre-application start code.</summary>
</member>
<member name="M:System.Web.Http.WebHost.PreApplicationStartCode.Start">
<summary>Registers the simple membership pre-application start code.</summary>
</member>
<member name="T:System.Web.Http.WebHost.WebHostBufferPolicySelector">
<summary>Represents the web host buffer policy selector.</summary>
</member>
<member name="M:System.Web.Http.WebHost.WebHostBufferPolicySelector.#ctor">
<summary>Initializes a new instance of the <see cref="T:System.Web.Http.WebHost.WebHostBufferPolicySelector" /> class.</summary>
</member>
<member name="M:System.Web.Http.WebHost.WebHostBufferPolicySelector.UseBufferedInputStream(System.Object)">
<summary>Gets a value that indicates whether the host should buffer the entity body of the HTTP request.</summary>
<returns>true if buffering should be used; otherwise a streamed request should be used.</returns>
<param name="hostContext">The host context.</param>
</member>
<member name="M:System.Web.Http.WebHost.WebHostBufferPolicySelector.UseBufferedOutputStream(System.Net.Http.HttpResponseMessage)">
<summary>Uses a buffered output stream for the web host.</summary>
<returns>A buffered output stream.</returns>
<param name="response">The response.</param>
</member>
</members>
</doc> | {
"pile_set_name": "Github"
} |
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Normalization table generator.
// Data read from the web.
// See forminfo.go for a description of the trie values associated with each rune.
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"sort"
"strconv"
"strings"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
func main() {
gen.Init()
loadUnicodeData()
compactCCC()
loadCompositionExclusions()
completeCharFields(FCanonical)
completeCharFields(FCompatibility)
computeNonStarterCounts()
verifyComputed()
printChars()
testDerived()
printTestdata()
makeTables()
}
var (
tablelist = flag.String("tables",
"all",
"comma-separated list of which tables to generate; "+
"can be 'decomp', 'recomp', 'info' and 'all'")
test = flag.Bool("test",
false,
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
verbose = flag.Bool("verbose",
false,
"write data to stdout as it is parsed")
)
const MaxChar = 0x10FFFF // anything above this shouldn't exist
// Quick Check properties of runes allow us to quickly
// determine whether a rune may occur in a normal form.
// For a given normal form, a rune may be guaranteed to occur
// verbatim (QC=Yes), may or may not combine with another
// rune (QC=Maybe), or may not occur (QC=No).
type QCResult int
const (
QCUnknown QCResult = iota
QCYes
QCNo
QCMaybe
)
func (r QCResult) String() string {
switch r {
case QCYes:
return "Yes"
case QCNo:
return "No"
case QCMaybe:
return "Maybe"
}
return "***UNKNOWN***"
}
const (
FCanonical = iota // NFC or NFD
FCompatibility // NFKC or NFKD
FNumberOfFormTypes
)
const (
MComposed = iota // NFC or NFKC
MDecomposed // NFD or NFKD
MNumberOfModes
)
// This contains only the properties we're interested in.
type Char struct {
name string
codePoint rune // if zero, this index is not a valid code point.
ccc uint8 // canonical combining class
origCCC uint8
excludeInComp bool // from CompositionExclusions.txt
compatDecomp bool // it has a compatibility expansion
nTrailingNonStarters uint8
nLeadingNonStarters uint8 // must be equal to trailing if non-zero
forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
state State
}
var chars = make([]Char, MaxChar+1)
var cccMap = make(map[uint8]uint8)
func (c Char) String() string {
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
fmt.Fprintf(buf, " state: %v\n", c.state)
fmt.Fprintf(buf, " NFC:\n")
fmt.Fprint(buf, c.forms[FCanonical])
fmt.Fprintf(buf, " NFKC:\n")
fmt.Fprint(buf, c.forms[FCompatibility])
return buf.String()
}
// In UnicodeData.txt, some ranges are marked like this:
// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
// parseCharacter keeps a state variable indicating the weirdness.
type State int
const (
SNormal State = iota // known to be zero for the type
SFirst
SLast
SMissing
)
var lastChar = rune('\u0000')
func (c Char) isValid() bool {
return c.codePoint != 0 && c.state != SMissing
}
type FormInfo struct {
quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
verified [MNumberOfModes]bool // index: MComposed or MDecomposed
combinesForward bool // May combine with rune on the right
combinesBackward bool // May combine with rune on the left
isOneWay bool // Never appears in result
inDecomp bool // Some decompositions result in this char.
decomp Decomposition
expandedDecomp Decomposition
}
func (f FormInfo) String() string {
buf := bytes.NewBuffer(make([]byte, 0))
fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
return buf.String()
}
type Decomposition []rune
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
decomp := strings.Split(s, " ")
if len(decomp) > 0 && skipfirst {
decomp = decomp[1:]
}
for _, d := range decomp {
point, err := strconv.ParseUint(d, 16, 64)
if err != nil {
return a, err
}
a = append(a, rune(point))
}
return a, nil
}
func loadUnicodeData() {
f := gen.OpenUCDFile("UnicodeData.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(ucd.CodePoint)
char := &chars[r]
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
decmap := p.String(ucd.DecompMapping)
exp, err := parseDecomposition(decmap, false)
isCompat := false
if err != nil {
if len(decmap) > 0 {
exp, err = parseDecomposition(decmap, true)
if err != nil {
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
}
isCompat = true
}
}
char.name = p.String(ucd.Name)
char.codePoint = r
char.forms[FCompatibility].decomp = exp
if !isCompat {
char.forms[FCanonical].decomp = exp
} else {
char.compatDecomp = true
}
if len(decmap) > 0 {
char.forms[FCompatibility].decomp = exp
}
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
}
// compactCCC converts the sparse set of CCC values to a continguous one,
// reducing the number of bits needed from 8 to 6.
func compactCCC() {
m := make(map[uint8]uint8)
for i := range chars {
c := &chars[i]
m[c.ccc] = 0
}
cccs := []int{}
for v, _ := range m {
cccs = append(cccs, int(v))
}
sort.Ints(cccs)
for i, c := range cccs {
cccMap[uint8(i)] = uint8(c)
m[uint8(c)] = uint8(i)
}
for i := range chars {
c := &chars[i]
c.origCCC = c.ccc
c.ccc = m[c.ccc]
}
if len(m) >= 1<<6 {
log.Fatalf("too many difference CCC values: %d >= 64", len(m))
}
}
// CompositionExclusions.txt has form:
// 0958 # ...
// See http://unicode.org/reports/tr44/ for full explanation
func loadCompositionExclusions() {
f := gen.OpenUCDFile("CompositionExclusions.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
c := &chars[p.Rune(0)]
if c.excludeInComp {
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
}
c.excludeInComp = true
}
if e := p.Err(); e != nil {
log.Fatal(e)
}
}
// hasCompatDecomp returns true if any of the recursive
// decompositions contains a compatibility expansion.
// In this case, the character may not occur in NFK*.
func hasCompatDecomp(r rune) bool {
c := &chars[r]
if c.compatDecomp {
return true
}
for _, d := range c.forms[FCompatibility].decomp {
if hasCompatDecomp(d) {
return true
}
}
return false
}
// Hangul related constants.
const (
HangulBase = 0xAC00
HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
JamoLBase = 0x1100
JamoLEnd = 0x1113
JamoVBase = 0x1161
JamoVEnd = 0x1176
JamoTBase = 0x11A8
JamoTEnd = 0x11C3
JamoLVTCount = 19 * 21 * 28
JamoTCount = 28
)
func isHangul(r rune) bool {
return HangulBase <= r && r < HangulEnd
}
func isHangulWithoutJamoT(r rune) bool {
if !isHangul(r) {
return false
}
r -= HangulBase
return r < JamoLVTCount && r%JamoTCount == 0
}
func ccc(r rune) uint8 {
return chars[r].ccc
}
// Insert a rune in a buffer, ordered by Canonical Combining Class.
func insertOrdered(b Decomposition, r rune) Decomposition {
n := len(b)
b = append(b, 0)
cc := ccc(r)
if cc > 0 {
// Use bubble sort.
for ; n > 0; n-- {
if ccc(b[n-1]) <= cc {
break
}
b[n] = b[n-1]
}
}
b[n] = r
return b
}
// Recursively decompose.
func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
dcomp := chars[r].forms[form].decomp
if len(dcomp) == 0 {
return insertOrdered(d, r)
}
for _, c := range dcomp {
d = decomposeRecursive(form, c, d)
}
return d
}
func completeCharFields(form int) {
// Phase 0: pre-expand decomposition.
for i := range chars {
f := &chars[i].forms[form]
if len(f.decomp) == 0 {
continue
}
exp := make(Decomposition, 0)
for _, c := range f.decomp {
exp = decomposeRecursive(form, c, exp)
}
f.expandedDecomp = exp
}
// Phase 1: composition exclusion, mark decomposition.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
// Marks script-specific exclusions and version restricted.
f.isOneWay = c.excludeInComp
// Singletons
f.isOneWay = f.isOneWay || len(f.decomp) == 1
// Non-starter decompositions
if len(f.decomp) > 1 {
chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
f.isOneWay = f.isOneWay || chk
}
// Runes that decompose into more than two runes.
f.isOneWay = f.isOneWay || len(f.decomp) > 2
if form == FCompatibility {
f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
}
for _, r := range f.decomp {
chars[r].forms[form].inDecomp = true
}
}
// Phase 2: forward and backward combining.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
if !f.isOneWay && len(f.decomp) == 2 {
f0 := &chars[f.decomp[0]].forms[form]
f1 := &chars[f.decomp[1]].forms[form]
if !f0.isOneWay {
f0.combinesForward = true
}
if !f1.isOneWay {
f1.combinesBackward = true
}
}
if isHangulWithoutJamoT(rune(i)) {
f.combinesForward = true
}
}
// Phase 3: quick check values.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
switch {
case len(f.decomp) > 0:
f.quickCheck[MDecomposed] = QCNo
case isHangul(rune(i)):
f.quickCheck[MDecomposed] = QCNo
default:
f.quickCheck[MDecomposed] = QCYes
}
switch {
case f.isOneWay:
f.quickCheck[MComposed] = QCNo
case (i & 0xffff00) == JamoLBase:
f.quickCheck[MComposed] = QCYes
if JamoLBase <= i && i < JamoLEnd {
f.combinesForward = true
}
if JamoVBase <= i && i < JamoVEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
f.combinesForward = true
}
if JamoTBase <= i && i < JamoTEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
}
case !f.combinesBackward:
f.quickCheck[MComposed] = QCYes
default:
f.quickCheck[MComposed] = QCMaybe
}
}
}
func computeNonStarterCounts() {
// Phase 4: leading and trailing non-starter count
for i := range chars {
c := &chars[i]
runes := []rune{rune(i)}
// We always use FCompatibility so that the CGJ insertion points do not
// change for repeated normalizations with different forms.
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
runes = exp
}
// We consider runes that combine backwards to be non-starters for the
// purpose of Stream-Safe Text Processing.
for _, r := range runes {
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nLeadingNonStarters++
}
for i := len(runes) - 1; i >= 0; i-- {
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nTrailingNonStarters++
}
if c.nTrailingNonStarters > 3 {
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
}
if isHangul(rune(i)) {
c.nTrailingNonStarters = 2
if isHangulWithoutJamoT(rune(i)) {
c.nTrailingNonStarters = 1
}
}
if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
}
if t := c.nTrailingNonStarters; t > 3 {
log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
}
}
}
func printBytes(w io.Writer, b []byte, name string) {
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
fmt.Fprintf(w, "var %s = [...]byte {", name)
for i, c := range b {
switch {
case i%64 == 0:
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
case i%8 == 0:
fmt.Fprintf(w, "\n")
}
fmt.Fprintf(w, "0x%.2X, ", c)
}
fmt.Fprint(w, "\n}\n\n")
}
// See forminfo.go for format.
func makeEntry(f *FormInfo, c *Char) uint16 {
e := uint16(0)
if r := c.codePoint; HangulBase <= r && r < HangulEnd {
e |= 0x40
}
if f.combinesForward {
e |= 0x20
}
if f.quickCheck[MDecomposed] == QCNo {
e |= 0x4
}
switch f.quickCheck[MComposed] {
case QCYes:
case QCNo:
e |= 0x10
case QCMaybe:
e |= 0x18
default:
log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
}
e |= uint16(c.nTrailingNonStarters)
return e
}
// decompSet keeps track of unique decompositions, grouped by whether
// the decomposition is followed by a trailing and/or leading CCC.
type decompSet [7]map[string]bool
const (
normalDecomp = iota
firstMulti
firstCCC
endMulti
firstLeadingCCC
firstCCCZeroExcept
firstStarterWithNLead
lastDecomp
)
var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
func makeDecompSet() decompSet {
m := decompSet{}
for i := range m {
m[i] = make(map[string]bool)
}
return m
}
func (m *decompSet) insert(key int, s string) {
m[key][s] = true
}
func printCharInfoTables(w io.Writer) int {
mkstr := func(r rune, f *FormInfo) (int, string) {
d := f.expandedDecomp
s := string([]rune(d))
if max := 1 << 6; len(s) >= max {
const msg = "%U: too many bytes in decomposition: %d >= %d"
log.Fatalf(msg, r, len(s), max)
}
head := uint8(len(s))
if f.quickCheck[MComposed] != QCYes {
head |= 0x40
}
if f.combinesForward {
head |= 0x80
}
s = string([]byte{head}) + s
lccc := ccc(d[0])
tccc := ccc(d[len(d)-1])
cc := ccc(r)
if cc != 0 && lccc == 0 && tccc == 0 {
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
}
if tccc < lccc && lccc != 0 {
const msg = "%U: lccc (%d) must be <= tcc (%d)"
log.Fatalf(msg, r, lccc, tccc)
}
index := normalDecomp
nTrail := chars[r].nTrailingNonStarters
nLead := chars[r].nLeadingNonStarters
if tccc > 0 || lccc > 0 || nTrail > 0 {
tccc <<= 2
tccc |= nTrail
s += string([]byte{tccc})
index = endMulti
for _, r := range d[1:] {
if ccc(r) == 0 {
index = firstCCC
}
}
if lccc > 0 || nLead > 0 {
s += string([]byte{lccc})
if index == firstCCC {
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
}
index = firstLeadingCCC
}
if cc != lccc {
if cc != 0 {
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
}
index = firstCCCZeroExcept
}
} else if len(d) > 1 {
index = firstMulti
}
return index, s
}
decompSet := makeDecompSet()
const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
decompSet.insert(firstStarterWithNLead, nLeadStr)
// Store the uniqued decompositions in a byte buffer,
// preceded by their byte length.
for _, c := range chars {
for _, f := range c.forms {
if len(f.expandedDecomp) == 0 {
continue
}
if f.combinesBackward {
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
}
index, s := mkstr(c.codePoint, &f)
decompSet.insert(index, s)
}
}
decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
size := 0
positionMap := make(map[string]uint16)
decompositions.WriteString("\000")
fmt.Fprintln(w, "const (")
for i, m := range decompSet {
sa := []string{}
for s := range m {
sa = append(sa, s)
}
sort.Strings(sa)
for _, s := range sa {
p := decompositions.Len()
decompositions.WriteString(s)
positionMap[s] = uint16(p)
}
if cname[i] != "" {
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
}
}
fmt.Fprintln(w, "maxDecomp = 0x8000")
fmt.Fprintln(w, ")")
b := decompositions.Bytes()
printBytes(w, b, "decomps")
size += len(b)
varnames := []string{"nfc", "nfkc"}
for i := 0; i < FNumberOfFormTypes; i++ {
trie := triegen.NewTrie(varnames[i])
for r, c := range chars {
f := c.forms[i]
d := f.expandedDecomp
if len(d) != 0 {
_, key := mkstr(c.codePoint, &f)
trie.Insert(rune(r), uint64(positionMap[key]))
if c.ccc != ccc(d[0]) {
// We assume the lead ccc of a decomposition !=0 in this case.
if ccc(d[0]) == 0 {
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
}
}
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
// Handle cases where it can't be detected that the nLead should be equal
// to nTrail.
trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
trie.Insert(c.codePoint, uint64(0x8000|v))
}
}
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
if err != nil {
log.Fatal(err)
}
size += sz
}
return size
}
func contains(sa []string, s string) bool {
for _, a := range sa {
if a == s {
return true
}
}
return false
}
func makeTables() {
w := &bytes.Buffer{}
size := 0
if *tablelist == "" {
return
}
list := strings.Split(*tablelist, ",")
if *tablelist == "all" {
list = []string{"recomp", "info"}
}
// Compute maximum decomposition size.
max := 0
for _, c := range chars {
if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
max = n
}
}
fmt.Fprintln(w, "const (")
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
fmt.Fprintln(w)
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
fmt.Fprintln(w, ")\n")
// Print the CCC remap table.
size += len(cccMap)
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
for i := 0; i < len(cccMap); i++ {
if i%8 == 0 {
fmt.Fprintln(w)
}
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
}
fmt.Fprintln(w, "\n}\n")
if contains(list, "info") {
size += printCharInfoTables(w)
}
if contains(list, "recomp") {
// Note that we use 32 bit keys, instead of 64 bit.
// This clips the bits of three entries, but we know
// this won't cause a collision. The compiler will catch
// any changes made to UnicodeData.txt that introduces
// a collision.
// Note that the recomposition map for NFC and NFKC
// are identical.
// Recomposition map
nrentries := 0
for _, c := range chars {
f := c.forms[FCanonical]
if !f.isOneWay && len(f.decomp) > 0 {
nrentries++
}
}
sz := nrentries * 8
size += sz
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
fmt.Fprintln(w, "var recompMap = map[uint32]rune{")
for i, c := range chars {
f := c.forms[FCanonical]
d := f.decomp
if !f.isOneWay && len(d) > 0 {
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i)
}
}
fmt.Fprintf(w, "}\n\n")
}
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
gen.WriteGoFile("tables.go", "norm", w.Bytes())
}
func printChars() {
if *verbose {
for _, c := range chars {
if !c.isValid() || c.state == SMissing {
continue
}
fmt.Println(c)
}
}
}
// verifyComputed does various consistency tests.
func verifyComputed() {
for i, c := range chars {
for _, f := range c.forms {
isNo := (f.quickCheck[MDecomposed] == QCNo)
if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
}
isMaybe := f.quickCheck[MComposed] == QCMaybe
if f.combinesBackward != isMaybe {
log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
}
if len(f.decomp) > 0 && f.combinesForward && isMaybe {
log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
}
if len(f.expandedDecomp) != 0 {
continue
}
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
// We accept these runes to be treated differently (it only affects
// segment breaking in iteration, most likely on improper use), but
// reconsider if more characters are added.
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
}
}
}
nfc := c.forms[FCanonical]
nfkc := c.forms[FCompatibility]
if nfc.combinesBackward != nfkc.combinesBackward {
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
}
}
}
// Use values in DerivedNormalizationProps.txt to compare against the
// values we computed.
// DerivedNormalizationProps.txt has form:
// 00C0..00C5 ; NFD_QC; N # ...
// 0374 ; NFD_QC; N # ...
// See http://unicode.org/reports/tr44/ for full explanation
func testDerived() {
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(0)
c := &chars[r]
var ftype, mode int
qt := p.String(1)
switch qt {
case "NFC_QC":
ftype, mode = FCanonical, MComposed
case "NFD_QC":
ftype, mode = FCanonical, MDecomposed
case "NFKC_QC":
ftype, mode = FCompatibility, MComposed
case "NFKD_QC":
ftype, mode = FCompatibility, MDecomposed
default:
continue
}
var qr QCResult
switch p.String(2) {
case "Y":
qr = QCYes
case "N":
qr = QCNo
case "M":
qr = QCMaybe
default:
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
}
if got := c.forms[ftype].quickCheck[mode]; got != qr {
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
}
c.forms[ftype].verified[mode] = true
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
// Any unspecified value must be QCYes. Verify this.
for i, c := range chars {
for j, fd := range c.forms {
for k, qr := range fd.quickCheck {
if !fd.verified[k] && qr != QCYes {
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
log.Printf(m, i, j, k, qr, c.name)
}
}
}
}
}
var testHeader = `const (
Yes = iota
No
Maybe
)
type formData struct {
qc uint8
combinesForward bool
decomposition string
}
type runeData struct {
r rune
ccc uint8
nLead uint8
nTrail uint8
f [2]formData // 0: canonical; 1: compatibility
}
func f(qc uint8, cf bool, dec string) [2]formData {
return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
}
func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
}
var testData = []runeData{
`
func printTestdata() {
type lastInfo struct {
ccc uint8
nLead uint8
nTrail uint8
f string
}
last := lastInfo{}
w := &bytes.Buffer{}
fmt.Fprintf(w, testHeader)
for r, c := range chars {
f := c.forms[FCanonical]
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
f = c.forms[FCompatibility]
qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
s := ""
if d == dk && qc == qck && cf == cfk {
s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
} else {
s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
}
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
if last != current {
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
last = current
}
}
fmt.Fprintln(w, "}")
gen.WriteGoFile("data_test.go", "norm", w.Bytes())
}
| {
"pile_set_name": "Github"
} |
import { extend } from '../../core/util';
import PROJ4326 from './Projection.EPSG4326';
/**
* For CGCS2000
*
* @class
* @category geo
* @protected
* @memberOf projection
* @name EPSG4490
* @mixes projection.EPSG4326
* @mixes measurer.WGS84Sphere
*/
export default extend({}, PROJ4326, /** @lends projection.EPSG4490 */ {
/**
* "EPSG:4490", Code of the projection
* @type {String}
* @constant
*/
code: 'EPSG:4490'
});
| {
"pile_set_name": "Github"
} |
/* Generated by RuntimeBrowser.
*/
@protocol _IKJSDeviceSettings <IKJSDeviceSettings, JSExport>
@required
- (IKJSRestrictions *)Restrictions;
- (struct CGSize { double x1; double x2; })Screen;
- (NSString *)name;
- (NSString *)preferredVideoFormat;
- (NSString *)preferredVideoPreviewFormat;
- (struct CGSize { double x1; double x2; })screen;
@end
| {
"pile_set_name": "Github"
} |
{
"comments": [],
"story": {
"body": null,
"comments": [],
"commentsCount": 0,
"id": "9052187",
"points": 5,
"source": "caltech.edu",
"submitted": "19 minutes ago",
"submitter": "ascertain",
"tag": null,
"title": "A Visual Approach to Calculus Problems (2000)",
"url": "http://www.its.caltech.edu/~mamikon/VisualCalc.html"
}
} | {
"pile_set_name": "Github"
} |
*[34mbar[0m[34m*[0m[34m[1][0m
[34m[1]: [0m[34m/url[0m
| {
"pile_set_name": "Github"
} |
/*
* (c) Copyright Christian P. Fries, Germany. Contact: [email protected].
*
* Created on 15.07.2012
*/
package net.finmath.timeseries.models.parametric;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.math3.analysis.MultivariateFunction;
import org.apache.commons.math3.optimization.GoalType;
import org.apache.commons.math3.optimization.PointValuePair;
import net.finmath.timeseries.HistoricalSimulationModel;
/**
* Displaced log-normal process with constanst volatility.
*
* This class estimate the process
* \[
* \mathrm{d} \log(X + a) = \frac{\sigma}{b + a} \mathrm{d}W(t)
* \]
* where \( a > -min(X(t_{i}) \) and thus \( X+a > 0 \) and \( b = 1 - -min(X(t_{i}) \) \) and
* \( \sigma \) is a constant.
*
* The choice of b ensures that b+a ≥ 1.
* For a=0 we have a log-normal process with volatility σ/(b + a).
* For a=infinity we have a normal process with volatility σ.
*
* @author Christian Fries
* @version 1.0
*/
public class DisplacedLognormal implements HistoricalSimulationModel {
private final double[] values;
private final double lowerBoundDisplacement;
private final double upperBoundDisplacement = 10000000;
private final int windowIndexStart;
private final int windowIndexEnd;
private final int maxIterations = 1000000;
public DisplacedLognormal(final double[] values) {
this.values = values;
windowIndexStart = 0;
windowIndexEnd = values.length-1;
double valuesMin = Double.MAX_VALUE;
for (int i = windowIndexStart; i <= windowIndexEnd; i++) {
valuesMin = Math.min(values[i], valuesMin);
}
lowerBoundDisplacement = -valuesMin+1;
}
public DisplacedLognormal(final double[] values, final double lowerBoundDisplacement) {
this.values = values;
windowIndexStart = 0;
windowIndexEnd = values.length-1;
double valuesMin = Double.MAX_VALUE;
for (int i = windowIndexStart; i <= windowIndexEnd; i++) {
valuesMin = Math.min(values[i], valuesMin);
}
this.lowerBoundDisplacement = Math.max(-valuesMin+1,lowerBoundDisplacement);
}
public DisplacedLognormal(final double[] values, final int windowIndexStart, final int windowIndexEnd) {
this.values = values;
this.windowIndexStart = windowIndexStart;
this.windowIndexEnd = windowIndexEnd;
double valuesMin = Double.MAX_VALUE;
for (int i = windowIndexStart; i <= windowIndexEnd; i++) {
valuesMin = Math.min(values[i], valuesMin);
}
lowerBoundDisplacement = -valuesMin+1;
}
public DisplacedLognormal(final double[] values, final double lowerBoundDisplacement, final int windowIndexStart, final int windowIndexEnd) {
this.values = values;
this.windowIndexStart = windowIndexStart;
this.windowIndexEnd = windowIndexEnd;
double valuesMin = Double.MAX_VALUE;
for (int i = windowIndexStart; i <= windowIndexEnd; i++) {
valuesMin = Math.min(values[i], valuesMin);
}
this.lowerBoundDisplacement = Math.max(-valuesMin+1,lowerBoundDisplacement);
}
@Override
public HistoricalSimulationModel getCloneWithWindow(final int windowIndexStart, final int windowIndexEnd) {
return new DisplacedLognormal(values, windowIndexStart, windowIndexEnd);
}
public HistoricalSimulationModel getCloneWithWindow(final double lowerBoundDisplacement, final int windowIndexStart, final int windowIndexEnd) {
return new DisplacedLognormal(values, lowerBoundDisplacement, windowIndexStart, windowIndexEnd);
}
public double getLogLikelihoodForParameters(final double omega, final double alpha, final double beta, final double displacement)
{
double logLikelihood = 0.0;
final double volScaling = (1+Math.abs(displacement));
double volSquaredEstimate = 0.0;
for (int i = windowIndexStart+1; i <= windowIndexEnd-1; i++) {
final double eval = volScaling * (Math.log((values[i]+displacement)/(values[i-1]+displacement)));
volSquaredEstimate += eval*eval;
}
volSquaredEstimate /= windowIndexEnd-windowIndexStart;
double eval = volScaling * (Math.log((values[windowIndexStart+1]+displacement)/(values[windowIndexStart+1-1]+displacement)));
for (int i = windowIndexStart+1; i <= windowIndexEnd-1; i++) {
final double evalNext = volScaling * (Math.log((values[i+1]+displacement)/(values[i]+displacement)));
final double volSquared = volSquaredEstimate / volScaling * volScaling; // h = (sigma*)^2, volSquared = (sigma^a)^2
logLikelihood += - Math.log(volSquaredEstimate) - 2 * Math.log((values[i+1]+displacement)/volScaling) - evalNext*evalNext / volSquaredEstimate;
eval = evalNext;
}
logLikelihood += - Math.log(2 * Math.PI) * (windowIndexEnd-windowIndexStart);
logLikelihood *= 0.5;
return logLikelihood;
}
public double getLastResidualForParameters(final double omega, final double alpha, final double beta, final double displacement) {
final double volScaling = (1+Math.abs(displacement));
double h = omega / (1.0 - alpha - beta);
for (int i = windowIndexStart+1; i <= windowIndexEnd; i++) {
final double eval = volScaling * (Math.log((values[i]+displacement)/(values[i-1]+displacement)));
// double eval = volScaling * (values[i]-values[i-1])/(values[i-1]+displacement);
h = omega + alpha * eval * eval + beta * h;
}
return h;
}
public double[] getQuantilPredictionsForParameters(final double omega, final double alpha, final double beta, final double displacement, final double[] quantiles) {
final double[] szenarios = new double[windowIndexEnd-windowIndexStart+1-1];
final double volScaling = (1+Math.abs(displacement));
double volSquaredEstimate = 0.0;
for (int i = windowIndexStart+1; i <= windowIndexEnd-1; i++) {
final double eval = volScaling * (Math.log((values[i]+displacement)/(values[i-1]+displacement)));
volSquaredEstimate += eval*eval;
}
volSquaredEstimate /= windowIndexEnd-windowIndexStart;
double vol = Math.sqrt(volSquaredEstimate) / volScaling;
for (int i = windowIndexStart+1; i <= windowIndexEnd; i++) {
final double y = Math.log((values[i]+displacement)/(values[i-1]+displacement));
// double y = (values[i]-values[i-1])/(values[i-1]+displacement);
szenarios[i-windowIndexStart-1] = y / vol;
final double eval = volScaling * y;
vol = Math.sqrt(volSquaredEstimate) / volScaling;
}
java.util.Arrays.sort(szenarios);
final double[] quantileValues = new double[quantiles.length];
for(int i=0; i<quantiles.length; i++) {
final double quantile = quantiles[i];
final double quantileIndex = szenarios.length * quantile - 1;
final int quantileIndexLo = (int)quantileIndex;
final int quantileIndexHi = quantileIndexLo+1;
final double szenarioRelativeChange =
(quantileIndexHi-quantileIndex) * Math.exp(szenarios[Math.max(quantileIndexLo,0 )] * vol)
+ (quantileIndex-quantileIndexLo) * Math.exp(szenarios[Math.min(quantileIndexHi,szenarios.length)] * vol);
/*
double szenarioRelativeChange =
(quantileIndexHi-quantileIndex) * (1 + szenarios[Math.max(quantileIndexLo,0 )] * vol)
+ (quantileIndex-quantileIndexLo) * (1 + szenarios[Math.min(quantileIndexHi,szenarios.length)] * vol);
*/
final double quantileValue = (values[windowIndexEnd]+displacement) * szenarioRelativeChange - displacement;
quantileValues[i] = quantileValue;
}
return quantileValues;
}
/* (non-Javadoc)
* @see net.finmath.timeseries.HistoricalSimulationModel#getBestParameters()
*/
@Override
public Map<String, Object> getBestParameters() {
return getBestParameters(null);
}
/* (non-Javadoc)
* @see net.finmath.timeseries.HistoricalSimulationModel#getBestParameters(java.util.Map)
*/
@Override
public Map<String, Object> getBestParameters(final Map<String, Object> guess) {
// Create the objective function for the solver
class GARCHMaxLikelihoodFunction implements MultivariateFunction, Serializable {
private static final long serialVersionUID = 7072187082052755854L;
@Override
public double value(final double[] variables) {
/*
* Transform variables: The solver variables are in (-\infty, \infty).
* We transform the variable to the admissible domain for GARCH, that is
* omega > 0, 0 < alpha < 1, 0 < beta < (1-alpha), displacement > lowerBoundDisplacement ??????
* ???? usually for GARCH the restrictions are written like omega > 0, alpha > 0, beta > 0, and alpha + beta < 1
*/
final double omega = Math.exp(variables[0]);
final double mucorr = Math.exp(-Math.exp(-variables[1]));
final double muema = Math.exp(-Math.exp(-variables[2]));
final double beta = mucorr * muema;
final double alpha = mucorr - beta;
// double alpha = 1.0/(1.0+Math.exp(-variables[1]));
// double beta = (1.0-alpha)*1.0/(1.0+Math.exp(-variables[2]));
final double displacementNormed = 1.0/(1.0+Math.exp(-variables[3]));
final double displacement = (upperBoundDisplacement-lowerBoundDisplacement)*displacementNormed+lowerBoundDisplacement;
double logLikelihood = getLogLikelihoodForParameters(omega,alpha,beta,displacement);
// Penalty to prevent solver from hitting the bounds
logLikelihood -= Math.max(1E-30-omega,0)/1E-30;
logLikelihood -= Math.max(1E-30-alpha,0)/1E-30;
logLikelihood -= Math.max((alpha-1)+1E-30,0)/1E-30;
logLikelihood -= Math.max(1E-30-beta,0)/1E-30;
logLikelihood -= Math.max((beta-1)+1E-30,0)/1E-30;
logLikelihood -= Math.max(1E-30-displacementNormed,0)/1E-30;
logLikelihood -= Math.max((displacementNormed-1)+1E-30,0)/1E-30;
return logLikelihood;
}
}
final GARCHMaxLikelihoodFunction objectiveFunction = new GARCHMaxLikelihoodFunction();
// Create a guess for the solver
double guessOmega = 1.0;
double guessAlpha = 0.2;
double guessBeta = 0.2;
double guessDisplacement = (lowerBoundDisplacement + upperBoundDisplacement) / 2.0;
if(guess != null) {
// A guess was provided, use that one
guessOmega = (Double)guess.get("Omega");
guessAlpha = (Double)guess.get("Alpha");
guessBeta = (Double)guess.get("Beta");
guessDisplacement = (Double)guess.get("Displacement");
}
// Constrain guess to admissible range
guessOmega = restrictToOpenSet(guessOmega, 0.0, Double.MAX_VALUE);
guessAlpha = restrictToOpenSet(guessAlpha, 0.0, 1.0);
guessBeta = restrictToOpenSet(guessBeta, 0.0, 1.0-guessAlpha);
guessDisplacement = restrictToOpenSet(guessDisplacement, lowerBoundDisplacement, upperBoundDisplacement);
final double guessMucorr = guessAlpha + guessBeta;
final double guessMuema = guessBeta / (guessAlpha+guessBeta);
// Transform guess to solver coordinates
final double[] guessParameters = new double[4];
guessParameters[0] = Math.log(guessOmega);
guessParameters[1] = -Math.log(-Math.log(guessMucorr));
guessParameters[2] = -Math.log(-Math.log(guessMuema));
guessParameters[3] = -Math.log(1.0/((guessDisplacement-lowerBoundDisplacement)/(upperBoundDisplacement-lowerBoundDisplacement))-1.0);
// Seek optimal parameter configuration
// org.apache.commons.math3.optimization.direct.BOBYQAOptimizer optimizer2 = new org.apache.commons.math3.optimization.direct.BOBYQAOptimizer(6);
final org.apache.commons.math3.optimization.direct.CMAESOptimizer optimizer2 = new org.apache.commons.math3.optimization.direct.CMAESOptimizer();
double[] bestParameters = null;
try {
final PointValuePair result = optimizer2.optimize(
maxIterations,
objectiveFunction,
GoalType.MAXIMIZE,
guessParameters /* start point */
);
bestParameters = result.getPoint();
} catch(final org.apache.commons.math3.exception.MathIllegalStateException e) {
// Retry with new guess. This guess corresponds to omaga=1, alpha=0.5; beta=0.25; displacement=1+lowerBoundDisplacement;
final double[] guessParameters2 = {0.0, 0.0, 0.0, 10.0};
/* PointValuePair result = optimizer2.optimize(
maxIterations,
objectiveFunction,
GoalType.MAXIMIZE,
guessParameters2
);*/
System.out.println("Solver failed");
bestParameters = guessParameters2;//result.getPoint();
}
// Transform parameters to GARCH parameters
final double omega = Math.exp(bestParameters[0]);
final double mucorr = Math.exp(-Math.exp(-bestParameters[1]));
final double muema = Math.exp(-Math.exp(-bestParameters[2]));
final double beta = mucorr * muema;
final double alpha = mucorr - beta;
final double displacementNormed = 1.0/(1.0+Math.exp(-bestParameters[3]));
final double displacement = (upperBoundDisplacement-lowerBoundDisplacement)*displacementNormed+lowerBoundDisplacement;
final double[] quantiles = {0.01, 0.05, 0.5};
final double[] quantileValues = this.getQuantilPredictionsForParameters(omega, alpha, beta, displacement, quantiles);
final Map<String, Object> results = new HashMap<>();
results.put("Omega", omega);
results.put("Alpha", alpha);
results.put("Beta", beta);
results.put("Displacement", displacement);
results.put("Likelihood", this.getLogLikelihoodForParameters(omega, alpha, beta, displacement));
results.put("Vol", Math.sqrt(this.getLastResidualForParameters(omega, alpha, beta, displacement)));
results.put("Quantile=1%", quantileValues[0]);
results.put("Quantile=5%", quantileValues[1]);
results.put("Quantile=50%", quantileValues[2]);
return results;
}
private static double restrictToOpenSet(double value, final double lowerBond, final double upperBound) {
value = Math.max(value, lowerBond * (1.0+Math.signum(lowerBond)*1E-15) + 1E-15);
value = Math.min(value, upperBound * (1.0-Math.signum(upperBound)*1E-15) - 1E-15);
return value;
}
}
| {
"pile_set_name": "Github"
} |
Matrix<float,2,3> m = Matrix<float,2,3>::Random();
Matrix2f y = Matrix2f::Random();
cout << "Here is the matrix m:" << endl << m << endl;
cout << "Here is the matrix y:" << endl << y << endl;
Matrix<float,3,2> x = m.fullPivLu().solve(y);
if((m*x).isApprox(y))
{
cout << "Here is a solution x to the equation mx=y:" << endl << x << endl;
}
else
cout << "The equation mx=y does not have any solution." << endl;
| {
"pile_set_name": "Github"
} |
<?php
namespace Aws\ResourceGroups;
use Aws\AwsClient;
/**
* This client is used to interact with the **AWS Resource Groups** service.
* @method \Aws\Result createGroup(array $args = [])
* @method \GuzzleHttp\Promise\Promise createGroupAsync(array $args = [])
* @method \Aws\Result deleteGroup(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteGroupAsync(array $args = [])
* @method \Aws\Result getGroup(array $args = [])
* @method \GuzzleHttp\Promise\Promise getGroupAsync(array $args = [])
* @method \Aws\Result getGroupQuery(array $args = [])
* @method \GuzzleHttp\Promise\Promise getGroupQueryAsync(array $args = [])
* @method \Aws\Result getTags(array $args = [])
* @method \GuzzleHttp\Promise\Promise getTagsAsync(array $args = [])
* @method \Aws\Result listGroupResources(array $args = [])
* @method \GuzzleHttp\Promise\Promise listGroupResourcesAsync(array $args = [])
* @method \Aws\Result listGroups(array $args = [])
* @method \GuzzleHttp\Promise\Promise listGroupsAsync(array $args = [])
* @method \Aws\Result searchResources(array $args = [])
* @method \GuzzleHttp\Promise\Promise searchResourcesAsync(array $args = [])
* @method \Aws\Result tag(array $args = [])
* @method \GuzzleHttp\Promise\Promise tagAsync(array $args = [])
* @method \Aws\Result untag(array $args = [])
* @method \GuzzleHttp\Promise\Promise untagAsync(array $args = [])
* @method \Aws\Result updateGroup(array $args = [])
* @method \GuzzleHttp\Promise\Promise updateGroupAsync(array $args = [])
* @method \Aws\Result updateGroupQuery(array $args = [])
* @method \GuzzleHttp\Promise\Promise updateGroupQueryAsync(array $args = [])
*/
class ResourceGroupsClient extends AwsClient {}
| {
"pile_set_name": "Github"
} |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/amba/mmci.h
*/
#ifndef AMBA_MMCI_H
#define AMBA_MMCI_H
#include <linux/mmc/host.h>
/**
* struct mmci_platform_data - platform configuration for the MMCI
* (also known as PL180) block.
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
* @ios_handler: a callback function to act on specfic ios changes,
* used for example to control a levelshifter
* mask into a value to be binary (or set some other custom bits
* in MMCIPWR) or:ed and written into the MMCIPWR register of the
* block. May also control external power based on the power_mode.
* @status: if no GPIO read function was given to the block in
* gpio_wp (below) this function will be called to determine
* whether a card is present in the MMC slot or not
* @gpio_wp: read this GPIO pin to see if the card is write protected
* @gpio_cd: read this GPIO pin to detect card insertion
* @cd_invert: true if the gpio_cd pin value is active low
*/
struct mmci_platform_data {
unsigned int ocr_mask;
int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
int gpio_wp;
int gpio_cd;
bool cd_invert;
};
#endif
| {
"pile_set_name": "Github"
} |
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_STUN_FIELD_TRIAL_H_
#define CONTENT_RENDERER_MEDIA_WEBRTC_STUN_FIELD_TRIAL_H_
#include <memory>
#include <string>
#include <vector>
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/threading/thread_checker.h"
#include "base/timer/timer.h"
#include "content/common/content_export.h"
#include "content/renderer/p2p/network_list_manager.h"
#include "content/renderer/p2p/network_list_observer.h"
#include "third_party/webrtc/base/network.h"
#include "third_party/webrtc/base/sigslot.h"
#include "third_party/webrtc/p2p/stunprober/stunprober.h"
namespace rtc {
class PacketSocketFactory;
class SocketAddress;
} // namespace rtc
namespace content {
// Wait for 30 seconds to avoid high CPU usage during browser start-up which
// might affect the accuracy of the trial. The trial wakes up the browser every
// 1 ms for no more than 3 seconds to see if time has passed for sending next
// stun probe.
static const int kExperimentStartDelayMs = 30000;
class StunProberTrial : public stunprober::StunProber::Observer,
public sigslot::has_slots<> {
public:
struct CONTENT_EXPORT Param {
Param();
~Param();
int requests_per_ip = 0;
int interval_ms = 0;
int shared_socket_mode = 0;
int batch_size = 0;
int total_batches = 0;
std::vector<rtc::SocketAddress> servers;
};
StunProberTrial(rtc::NetworkManager* network_manager,
const std::string& params,
rtc::PacketSocketFactory* factory);
~StunProberTrial() override;
private:
// This will use |factory_| to create sockets, send stun binding requests with
// various intervals as determined by |params|, observed the success rate and
// latency of the stun responses and report through UMA.
void OnNetworksChanged();
// Parsing function to decode the '/' separated parameter string |params|.
static CONTENT_EXPORT bool ParseParameters(const std::string& param_line,
Param* params);
// stunprober::StunProber::Observer:
void OnPrepared(stunprober::StunProber* prober,
stunprober::StunProber::Status status) override;
// OnFinished is invoked when the StunProber receives all the responses or
// times out.
void OnFinished(stunprober::StunProber* prober,
stunprober::StunProber::Status status) override;
// This will be invoked repeatedly for |total_probers_| times with the
// interval equal to the estimated run time of a prober.
void OnTimer();
void SaveHistogramData();
rtc::NetworkManager* network_manager_;
std::string param_line_;
rtc::PacketSocketFactory* factory_ = nullptr;
int total_probers_ = 0;
int batch_size_ = 0;
int ready_probers_ = 0;
int started_probers_ = 0;
int finished_probers_ = 0;
std::vector<stunprober::StunProber*> probers_;
base::ThreadChecker thread_checker_;
// The reason we use a timer instead of depending on the OnFinished callback
// of each prober is that the OnFinished is not fired at the last of STUN
// request of each prober, instead, it includes a timeout period which waits
// the server response to come back. Having a timer guarantees the
// inter-prober intervals is the same as the STUN interval inside a prober.
base::RepeatingTimer timer_;
FRIEND_TEST_ALL_PREFIXES(StunProbeTrial, VerifyParameterParsing);
DISALLOW_COPY_AND_ASSIGN(StunProberTrial);
};
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_WEBRTC_STUN_FIELD_TRIAL_H_
| {
"pile_set_name": "Github"
} |
/***************************************************************************
* V4L2 driver for ET61X[12]51 PC Camera Controllers *
* *
* Copyright (C) 2006 by Luca Risolia <[email protected]> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the Free Software *
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
***************************************************************************/
#ifndef _ET61X251_H_
#define _ET61X251_H_
#include <linux/version.h>
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/types.h>
#include <linux/param.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/kref.h>
#include "et61x251_sensor.h"
/*****************************************************************************/
#define ET61X251_DEBUG
#define ET61X251_DEBUG_LEVEL 2
#define ET61X251_MAX_DEVICES 64
#define ET61X251_PRESERVE_IMGSCALE 0
#define ET61X251_FORCE_MUNMAP 0
#define ET61X251_MAX_FRAMES 32
#define ET61X251_COMPRESSION_QUALITY 0
#define ET61X251_URBS 2
#define ET61X251_ISO_PACKETS 7
#define ET61X251_ALTERNATE_SETTING 13
#define ET61X251_URB_TIMEOUT msecs_to_jiffies(2 * ET61X251_ISO_PACKETS)
#define ET61X251_CTRL_TIMEOUT 100
#define ET61X251_FRAME_TIMEOUT 2
/*****************************************************************************/
static const struct usb_device_id et61x251_id_table[] = {
{ USB_DEVICE(0x102c, 0x6151), },
{ USB_DEVICE(0x102c, 0x6251), },
{ USB_DEVICE(0x102c, 0x6253), },
{ USB_DEVICE(0x102c, 0x6254), },
{ USB_DEVICE(0x102c, 0x6255), },
{ USB_DEVICE(0x102c, 0x6256), },
{ USB_DEVICE(0x102c, 0x6257), },
{ USB_DEVICE(0x102c, 0x6258), },
{ USB_DEVICE(0x102c, 0x6259), },
{ USB_DEVICE(0x102c, 0x625a), },
{ USB_DEVICE(0x102c, 0x625b), },
{ USB_DEVICE(0x102c, 0x625c), },
{ USB_DEVICE(0x102c, 0x625d), },
{ USB_DEVICE(0x102c, 0x625e), },
{ USB_DEVICE(0x102c, 0x625f), },
{ USB_DEVICE(0x102c, 0x6260), },
{ USB_DEVICE(0x102c, 0x6261), },
{ USB_DEVICE(0x102c, 0x6262), },
{ USB_DEVICE(0x102c, 0x6263), },
{ USB_DEVICE(0x102c, 0x6264), },
{ USB_DEVICE(0x102c, 0x6265), },
{ USB_DEVICE(0x102c, 0x6266), },
{ USB_DEVICE(0x102c, 0x6267), },
{ USB_DEVICE(0x102c, 0x6268), },
{ USB_DEVICE(0x102c, 0x6269), },
{ }
};
ET61X251_SENSOR_TABLE
/*****************************************************************************/
enum et61x251_frame_state {
F_UNUSED,
F_QUEUED,
F_GRABBING,
F_DONE,
F_ERROR,
};
struct et61x251_frame_t {
void* bufmem;
struct v4l2_buffer buf;
enum et61x251_frame_state state;
struct list_head frame;
unsigned long vma_use_count;
};
enum et61x251_dev_state {
DEV_INITIALIZED = 0x01,
DEV_DISCONNECTED = 0x02,
DEV_MISCONFIGURED = 0x04,
};
enum et61x251_io_method {
IO_NONE,
IO_READ,
IO_MMAP,
};
enum et61x251_stream_state {
STREAM_OFF,
STREAM_INTERRUPT,
STREAM_ON,
};
struct et61x251_sysfs_attr {
u8 reg, i2c_reg;
};
struct et61x251_module_param {
u8 force_munmap;
u16 frame_timeout;
};
static DEFINE_MUTEX(et61x251_sysfs_lock);
static DECLARE_RWSEM(et61x251_dev_lock);
struct et61x251_device {
struct video_device* v4ldev;
struct et61x251_sensor sensor;
struct usb_device* usbdev;
struct urb* urb[ET61X251_URBS];
void* transfer_buffer[ET61X251_URBS];
u8* control_buffer;
struct et61x251_frame_t *frame_current, frame[ET61X251_MAX_FRAMES];
struct list_head inqueue, outqueue;
u32 frame_count, nbuffers, nreadbuffers;
enum et61x251_io_method io;
enum et61x251_stream_state stream;
struct v4l2_jpegcompression compression;
struct et61x251_sysfs_attr sysfs;
struct et61x251_module_param module_param;
struct kref kref;
enum et61x251_dev_state state;
u8 users;
struct completion probe;
struct mutex open_mutex, fileop_mutex;
spinlock_t queue_lock;
wait_queue_head_t wait_open, wait_frame, wait_stream;
};
/*****************************************************************************/
struct et61x251_device*
et61x251_match_id(struct et61x251_device* cam, const struct usb_device_id *id)
{
return usb_match_id(usb_ifnum_to_if(cam->usbdev, 0), id) ? cam : NULL;
}
void
et61x251_attach_sensor(struct et61x251_device* cam,
const struct et61x251_sensor* sensor)
{
memcpy(&cam->sensor, sensor, sizeof(struct et61x251_sensor));
}
/*****************************************************************************/
#undef DBG
#undef KDBG
#ifdef ET61X251_DEBUG
# define DBG(level, fmt, args...) \
do { \
if (debug >= (level)) { \
if ((level) == 1) \
dev_err(&cam->usbdev->dev, fmt "\n", ## args); \
else if ((level) == 2) \
dev_info(&cam->usbdev->dev, fmt "\n", ## args); \
else if ((level) >= 3) \
dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
__FILE__, __func__, __LINE__ , ## args); \
} \
} while (0)
# define KDBG(level, fmt, args...) \
do { \
if (debug >= (level)) { \
if ((level) == 1 || (level) == 2) \
pr_info("et61x251: " fmt "\n", ## args); \
else if ((level) == 3) \
pr_debug("sn9c102: [%s:%s:%d] " fmt "\n", __FILE__, \
__func__, __LINE__ , ## args); \
} \
} while (0)
# define V4LDBG(level, name, cmd) \
do { \
if (debug >= (level)) \
v4l_print_ioctl(name, cmd); \
} while (0)
#else
# define DBG(level, fmt, args...) do {;} while(0)
# define KDBG(level, fmt, args...) do {;} while(0)
# define V4LDBG(level, name, cmd) do {;} while(0)
#endif
#undef PDBG
#define PDBG(fmt, args...) \
dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", __FILE__, __func__, \
__LINE__ , ## args)
#undef PDBGG
#define PDBGG(fmt, args...) do {;} while(0) /* placeholder */
#endif /* _ET61X251_H_ */
| {
"pile_set_name": "Github"
} |
// IA64.h
#ifndef __IA64_H
#define __IA64_H
#include "BranchCoder.h"
MyClassA(BC_IA64, 0x04, 1)
#endif
| {
"pile_set_name": "Github"
} |
/*
* The MIT License (MIT)
*
* Copyright © 2018 Franklin "Snaipe" Mathieu <http://snai.pe/>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef CRITERION_INTERNAL_STREAM_H_
#define CRITERION_INTERNAL_STREAM_H_
#define CRI_ASSERT_TEST_TAG_stream ,
#define CRI_ASSERT_TEST_TAGC_stream() ,
#define CRI_ASSERT_TYPE_TAG_stream struct cr_stream,
#define CRI_ASSERT_TYPE_TAG_ID_stream stream,
#ifdef __cplusplus
# include <string>
# include <ostream>
inline bool operator==(criterion::stream &s1, criterion::stream &s2)
{
return cr_user_stream_eq(&s1, &s2);
}
inline bool operator<(criterion::stream &s1, criterion::stream &s2)
{
return cr_user_stream_lt(&s1, &s2);
}
inline std::ostream &operator<<(std::ostream &os, const criterion::stream &s)
{
char *str = cr_user_stream_tostr(&s);
os << std::string(str);
free(str);
return os;
}
#endif /* !__cplusplus */
#endif /* !CRITERION_INTERNAL_STREAM_H_ */
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" ?>
<annotation>
<folder>widerface</folder>
<filename>39--Ice_Skating_39_Ice_Skating_Ice_Skating_39_13.jpg</filename>
<source>
<database>wider face Database</database>
<annotation>PASCAL VOC2007</annotation>

<flickrid>-1</flickrid>
</source>
<owner>
<flickrid>yanyu</flickrid>
<name>yanyu</name>
</owner>
<size>
<width>1024</width>
<height>768</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>109</xmin>
<ymin>389</ymin>
<xmax>129</xmax>
<ymax>418</ymax>
</bndbox>
<lm>
<x1>121.688</x1>
<y1>400.75</y1>
<x2>128.062</x2>
<y2>400.188</y2>
<x3>127.125</x3>
<y3>406.0</y3>
<x4>121.875</x4>
<y4>411.062</y4>
<x5>126.188</x5>
<y5>410.5</y5>
<visible>1</visible>
<blur>0.46</blur>
</lm>
<has_lm>1</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>141</xmin>
<ymin>405</ymin>
<xmax>168</xmax>
<ymax>434</ymax>
</bndbox>
<lm>
<x1>146.938</x1>
<y1>416.75</y1>
<x2>160.438</x2>
<y2>415.625</y2>
<x3>153.875</x3>
<y3>422.375</y3>
<x4>151.25</x4>
<y4>428.375</y4>
<x5>158.375</x5>
<y5>428.938</y5>
<visible>0</visible>
<blur>0.5</blur>
</lm>
<has_lm>1</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>111</xmin>
<ymin>313</ymin>
<xmax>123</xmax>
<ymax>329</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>34</xmin>
<ymin>327</ymin>
<xmax>44</xmax>
<ymax>339</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>193</xmin>
<ymin>294</ymin>
<xmax>209</xmax>
<ymax>318</ymax>
</bndbox>
<lm>
<x1>201.375</x1>
<y1>302.844</y1>
<x2>206.844</x2>
<y2>303.0</y2>
<x3>207.156</x3>
<y3>306.75</y3>
<x4>202.312</x4>
<y4>311.438</y4>
<x5>206.375</x5>
<y5>311.594</y5>
<visible>1</visible>
<blur>0.46</blur>
</lm>
<has_lm>1</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>228</xmin>
<ymin>321</ymin>
<xmax>240</xmax>
<ymax>336</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>355</xmin>
<ymin>306</ymin>
<xmax>376</xmax>
<ymax>327</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>286</xmin>
<ymin>306</ymin>
<xmax>296</xmax>
<ymax>321</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>433</xmin>
<ymin>353</ymin>
<xmax>445</xmax>
<ymax>372</ymax>
</bndbox>
<lm>
<x1>440.29</x1>
<y1>359.571</y1>
<x2>443.424</x2>
<y2>360.054</y2>
<x3>443.786</x3>
<y3>363.79</y3>
<x4>441.013</x4>
<y4>366.08</y4>
<x5>442.821</x5>
<y5>366.562</y5>
<visible>1</visible>
<blur>0.26</blur>
</lm>
<has_lm>1</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>435</xmin>
<ymin>312</ymin>
<xmax>450</xmax>
<ymax>329</ymax>
</bndbox>
<lm>
<x1>439.21</x1>
<y1>319.719</y1>
<x2>444.79</x2>
<y2>319.272</y2>
<x3>442.112</x3>
<y3>322.286</y3>
<x4>439.433</x4>
<y4>324.741</y4>
<x5>444.902</x5>
<y5>324.295</y5>
<visible>1</visible>
<blur>0.39</blur>
</lm>
<has_lm>1</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>503</xmin>
<ymin>303</ymin>
<xmax>518</xmax>
<ymax>325</ymax>
</bndbox>
<lm>
<x1>510.643</x1>
<y1>312.286</y1>
<x2>517.5</x2>
<y2>310.714</y2>
<x3>514.5</x3>
<y3>317.143</y3>
<x4>509.786</x4>
<y4>319.571</y4>
<x5>516.643</x5>
<y5>318.143</y5>
<visible>1</visible>
<blur>0.33</blur>
</lm>
<has_lm>1</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>454</xmin>
<ymin>312</ymin>
<xmax>467</xmax>
<ymax>325</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>847</xmin>
<ymin>289</ymin>
<xmax>857</xmax>
<ymax>305</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>372</xmin>
<ymin>390</ymin>
<xmax>390</xmax>
<ymax>421</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
<object>
<name>face</name>
<pose>Unspecified</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>507</xmin>
<ymin>263</ymin>
<xmax>518</xmax>
<ymax>273</ymax>
</bndbox>
<has_lm>0</has_lm>
</object>
</annotation>
| {
"pile_set_name": "Github"
} |
// This is a part of the Microsoft Foundation Classes C++ library.
// Copyright (C) Microsoft Corporation
// All rights reserved.
//
// This source code is only intended as a supplement to the
// Microsoft Foundation Classes Reference and related
// electronic documentation provided with the library.
// See these sources for detailed information regarding the
// Microsoft Foundation Classes product.
#include "stdafx.h"
#include "IEDemo.h"
#include "IEDemoDoc.h"
#ifdef _DEBUG
#define new DEBUG_NEW
#undef THIS_FILE
static char THIS_FILE[] = __FILE__;
#endif
/////////////////////////////////////////////////////////////////////////////
// CIEDemoDoc
IMPLEMENT_DYNCREATE(CIEDemoDoc, CDocument)
BEGIN_MESSAGE_MAP(CIEDemoDoc, CDocument)
//{{AFX_MSG_MAP(CIEDemoDoc)
// NOTE - the ClassWizard will add and remove mapping macros here.
// DO NOT EDIT what you see in these blocks of generated code!
//}}AFX_MSG_MAP
END_MESSAGE_MAP()
/////////////////////////////////////////////////////////////////////////////
// CIEDemoDoc construction/destruction
CIEDemoDoc::CIEDemoDoc()
{
m_iHistoryOffset = 0;
m_arHistory.SetSize (0, 1);
}
CIEDemoDoc::~CIEDemoDoc()
{
for (int i = 0; i < m_arHistory.GetSize (); i ++)
{
ASSERT (m_arHistory [i] != NULL);
delete m_arHistory [i];
}
}
BOOL CIEDemoDoc::OnNewDocument()
{
if (!CDocument::OnNewDocument())
return FALSE;
// TODO: add reinitialization code here
// (SDI documents will reuse this document)
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
// CIEDemoDoc serialization
void CIEDemoDoc::Serialize(CArchive& ar)
{
if (ar.IsStoring())
{
// TODO: add storing code here
}
else
{
// TODO: add loading code here
}
}
/////////////////////////////////////////////////////////////////////////////
// CIEDemoDoc diagnostics
#ifdef _DEBUG
void CIEDemoDoc::AssertValid() const
{
CDocument::AssertValid();
}
void CIEDemoDoc::Dump(CDumpContext& dc) const
{
CDocument::Dump(dc);
}
#endif //_DEBUG
/////////////////////////////////////////////////////////////////////////////
// CIEDemoDoc commands
CHistoryObj* CIEDemoDoc::AddURLToHistory (const CString& strTitle, const CString& strURL)
{
ASSERT (m_arHistory.GetSize () <= HISTORY_LEN);
for (int i = 0; i < m_arHistory.GetSize (); i ++)
{
CHistoryObj* pObj = m_arHistory [i];
ASSERT (pObj != NULL);
if (pObj && pObj->GetTitle () == strTitle && pObj->GetURL () == strURL)
{
return pObj;
}
}
if (m_arHistory.GetSize () == HISTORY_LEN)
{
delete m_arHistory [0];
m_arHistory.RemoveAt (0);
}
CHistoryObj* pObj = new CHistoryObj (strTitle, strURL,
FIRST_HISTORY_COMMAND + (UINT)m_arHistory.GetSize ());
m_arHistory.InsertAt (0, pObj);
m_iHistoryOffset = 0;
return pObj;
}
//****************************************************************************************
void CIEDemoDoc::GetBackList (_T_HistotyList& lst) const
{
lst.RemoveAll ();
for (int i = m_iHistoryOffset + 1; i < m_arHistory.GetSize () ; i ++)
{
lst.AddTail (m_arHistory [i]);
}
}
//****************************************************************************************
void CIEDemoDoc::GetFrwdList (_T_HistotyList& lst) const
{
lst.RemoveAll ();
for (int i = m_iHistoryOffset - 1; i >= 0; i --)
{
ASSERT (i < m_arHistory.GetSize ());
lst.AddTail (m_arHistory [i]);
}
}
//****************************************************************************************
CHistoryObj* CIEDemoDoc::Go (UINT uiCmd)
{
for (int i = 0; i < m_arHistory.GetSize (); i ++)
{
CHistoryObj* pObj = m_arHistory [i];
ASSERT (pObj != NULL);
if (pObj && pObj->GetCommand () == uiCmd)
{
m_arHistory.RemoveAt (i);
m_arHistory.Add (pObj);
m_iHistoryOffset = 0;
return pObj;
}
}
return NULL;
}
| {
"pile_set_name": "Github"
} |
/*
* Postcopy migration for RAM
*
* Copyright 2013 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Dave Gilbert <[email protected]>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef QEMU_POSTCOPY_RAM_H
#define QEMU_POSTCOPY_RAM_H
/* Return true if the host supports everything we need to do postcopy-ram */
bool postcopy_ram_supported_by_host(void);
/*
* Make all of RAM sensitive to accesses to areas that haven't yet been written
* and wire up anything necessary to deal with it.
*/
int postcopy_ram_enable_notify(MigrationIncomingState *mis);
/*
* Initialise postcopy-ram, setting the RAM to a state where we can go into
* postcopy later; must be called prior to any precopy.
* called from ram.c's similarly named ram_postcopy_incoming_init
*/
int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages);
/*
* At the end of a migration where postcopy_ram_incoming_init was called.
*/
int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis);
/*
* Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
* however leaving it until after precopy means that most of the precopy
* data is still THPd
*/
int postcopy_ram_prepare_discard(MigrationIncomingState *mis);
/*
* Called at the start of each RAMBlock by the bitmap code.
* 'offset' is the bitmap offset of the named RAMBlock in the migration
* bitmap.
* Returns a new PDS
*/
PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
unsigned long offset,
const char *name);
/*
* Called by the bitmap code for each chunk to discard.
* May send a discard message, may just leave it queued to
* be sent later.
* @start,@length: a range of pages in the migration bitmap in the
* RAM block passed to postcopy_discard_send_init() (length=1 is one page)
*/
void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
unsigned long start, unsigned long length);
/*
* Called at the end of each RAMBlock by the bitmap code.
* Sends any outstanding discard messages, frees the PDS.
*/
void postcopy_discard_send_finish(MigrationState *ms,
PostcopyDiscardState *pds);
/*
* Place a page (from) at (host) efficiently
* There are restrictions on how 'from' must be mapped, in general best
* to use other postcopy_ routines to allocate.
* returns 0 on success
*/
int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
size_t pagesize);
/*
* Place a zero page at (host) atomically
* returns 0 on success
*/
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
size_t pagesize);
/*
* Allocate a page of memory that can be mapped at a later point in time
* using postcopy_place_page
* Returns: Pointer to allocated page
*/
void *postcopy_get_tmp_page(MigrationIncomingState *mis);
#endif
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectName>wxWidgets_net</ProjectName>
<ProjectGuid>{69F2EDE4-7D21-5738-9BC0-F66F61C9AE00}</ProjectGuid>
<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<PlatformToolset>v142</PlatformToolset>
<UseOfMfc>false</UseOfMfc>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<PlatformToolset>v142</PlatformToolset>
<UseOfMfc>false</UseOfMfc>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<_ProjectFileVersion>11.0.60315.1</_ProjectFileVersion>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>..\wx\lib\vc_lib\</OutDir>
<IntDir>vc_mswud\net\</IntDir>
<TargetName>wxbase29ud_net</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>..\wx\lib\vc_lib\</OutDir>
<IntDir>vc_mswu\net\</IntDir>
<TargetName>wxbase29u_net</TargetName>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Midl>
<PreprocessorDefinitions>WIN32;_LIB;_DEBUG;_CRT_SECURE_NO_DEPRECATE=1;_CRT_NON_CONFORMING_SWPRINTFS=1;_SCL_SECURE_NO_WARNINGS=1;__WXMSW__;_UNICODE;WXBUILDING;wxUSE_GUI=0;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>config;..\wx\include;..\wx\src\tiff\libtiff;..\wx\src\jpeg;..\wx\src\png;..\zlib;..\wx\src\regex;..\wx\src\expat\lib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</Midl>
<ClCompile>
<AdditionalOptions>/MP %(AdditionalOptions)</AdditionalOptions>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>config;..\wx\include;..\wx\src\tiff\libtiff;..\wx\src\jpeg;..\wx\src\png;..\zlib;..\wx\src\regex;..\wx\src\expat\lib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN32;_LIB;_DEBUG;_CRT_SECURE_NO_DEPRECATE=1;_CRT_NON_CONFORMING_SWPRINTFS=1;_SCL_SECURE_NO_WARNINGS=1;__WXMSW__;_UNICODE;WXBUILDING;wxUSE_GUI=0;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ExceptionHandling>Sync</ExceptionHandling>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
<BufferSecurityCheck>true</BufferSecurityCheck>
<RuntimeTypeInfo>true</RuntimeTypeInfo>
<PrecompiledHeader>Use</PrecompiledHeader>
<PrecompiledHeaderFile>wx/wxprec.h</PrecompiledHeaderFile>
<PrecompiledHeaderOutputFile>vc_mswud\wxprec_netlib.pch</PrecompiledHeaderOutputFile>
<ObjectFileName>vc_mswud\net\</ObjectFileName>
<ProgramDataBaseFileName>..\wx\lib\vc_lib\wxbase29ud_net.pdb</ProgramDataBaseFileName>
<WarningLevel>Level4</WarningLevel>
<SuppressStartupBanner>true</SuppressStartupBanner>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile>
<ResourceCompile>
<PreprocessorDefinitions>_DEBUG;_CRT_SECURE_NO_DEPRECATE=1;_CRT_NON_CONFORMING_SWPRINTFS=1;_SCL_SECURE_NO_WARNINGS=1;__WXMSW__;_UNICODE;WXBUILDING;wxUSE_GUI=0;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<Culture>0x0409</Culture>
<AdditionalIncludeDirectories>config;..\wx\include;..\wx\src\tiff\libtiff;..\wx\src\jpeg;..\wx\src\png;..\zlib;..\wx\src\regex;..\wx\src\expat\lib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ResourceCompile>
<Lib>
<OutputFile>..\wx\lib\vc_lib\wxbase29ud_net.lib</OutputFile>
<SuppressStartupBanner>true</SuppressStartupBanner>
</Lib>
<Bscmake>
<OutputFile>..\wx\lib\vc_lib\wx_net.bsc</OutputFile>
<SuppressStartupBanner>true</SuppressStartupBanner>
</Bscmake>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Midl>
<PreprocessorDefinitions>WIN32;_LIB;_CRT_SECURE_NO_DEPRECATE=1;_CRT_NON_CONFORMING_SWPRINTFS=1;_SCL_SECURE_NO_WARNINGS=1;__WXMSW__;NDEBUG;_UNICODE;WXBUILDING;wxUSE_GUI=0;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>config;..\wx\include;..\wx\src\tiff\libtiff;..\wx\src\jpeg;..\wx\src\png;..\zlib;..\wx\src\regex;..\wx\src\expat\lib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</Midl>
<ClCompile>
<AdditionalOptions>/MP %(AdditionalOptions)</AdditionalOptions>
<Optimization>MaxSpeed</Optimization>
<AdditionalIncludeDirectories>config;..\wx\include;..\wx\src\tiff\libtiff;..\wx\src\jpeg;..\wx\src\png;..\zlib;..\wx\src\regex;..\wx\src\expat\lib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN32;_LIB;_CRT_SECURE_NO_DEPRECATE=1;_CRT_NON_CONFORMING_SWPRINTFS=1;_SCL_SECURE_NO_WARNINGS=1;__WXMSW__;NDEBUG;_UNICODE;WXBUILDING;wxUSE_GUI=0;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ExceptionHandling>Sync</ExceptionHandling>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<RuntimeTypeInfo>true</RuntimeTypeInfo>
<PrecompiledHeader>Use</PrecompiledHeader>
<PrecompiledHeaderFile>wx/wxprec.h</PrecompiledHeaderFile>
<PrecompiledHeaderOutputFile>vc_mswu\wxprec_netlib.pch</PrecompiledHeaderOutputFile>
<ObjectFileName>vc_mswu\net\</ObjectFileName>
<ProgramDataBaseFileName>..\wx\lib\vc_lib\wxbase29u_net.pdb</ProgramDataBaseFileName>
<WarningLevel>Level4</WarningLevel>
<SuppressStartupBanner>true</SuppressStartupBanner>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile>
<ResourceCompile>
<PreprocessorDefinitions>_CRT_SECURE_NO_DEPRECATE=1;_CRT_NON_CONFORMING_SWPRINTFS=1;_SCL_SECURE_NO_WARNINGS=1;__WXMSW__;NDEBUG;_UNICODE;WXBUILDING;wxUSE_GUI=0;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<Culture>0x0409</Culture>
<AdditionalIncludeDirectories>config;..\wx\include;..\wx\src\tiff\libtiff;..\wx\src\jpeg;..\wx\src\png;..\zlib;..\wx\src\regex;..\wx\src\expat\lib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ResourceCompile>
<Lib>
<OutputFile>..\wx\lib\vc_lib\wxbase29u_net.lib</OutputFile>
<SuppressStartupBanner>true</SuppressStartupBanner>
</Lib>
<Bscmake>
<OutputFile>..\wx\lib\vc_lib\wx_net.bsc</OutputFile>
<SuppressStartupBanner>true</SuppressStartupBanner>
</Bscmake>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="..\wx\src\common\dummy.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\wx\src\common\sckaddr.cpp" />
<ClCompile Include="..\wx\src\common\sckipc.cpp" />
<ClCompile Include="..\wx\src\common\sckstrm.cpp" />
<ClCompile Include="..\wx\src\common\socket.cpp" />
<ClCompile Include="..\wx\src\msw\sockmsw.cpp" />
<ClCompile Include="..\wx\src\msw\urlmsw.cpp" />
</ItemGroup>
<ItemGroup>
<ResourceCompile Include="..\wx\src\msw\version.rc">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
</ResourceCompile>
</ItemGroup>
<ItemGroup>
<CustomBuild Include="..\wx\include\wx\msw\genrcdefs.h">
<Message Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Creating ..\wx\lib\vc_lib\mswud\wx\msw\rcdefs.h</Message>
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">cl /EP /nologo "%(FullPath)" > "..\wx\lib\vc_lib\mswud\wx\msw\rcdefs.h"</Command>
<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..\wx\lib\vc_lib\mswud\wx\msw\rcdefs.h;%(Outputs)</Outputs>
<Message Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Creating ..\wx\lib\vc_lib\mswu\wx\msw\rcdefs.h</Message>
<Command Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">cl /EP /nologo "%(FullPath)" > "..\wx\lib\vc_lib\mswu\wx\msw\rcdefs.h"</Command>
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..\wx\lib\vc_lib\mswu\wx\msw\rcdefs.h;%(Outputs)</Outputs>
</CustomBuild>
<ClInclude Include="..\wx\include\wx\protocol\file.h" />
<ClInclude Include="..\wx\include\wx\fs_inet.h" />
<ClInclude Include="..\wx\include\wx\protocol\ftp.h" />
<ClInclude Include="..\wx\include\wx\protocol\http.h" />
<ClInclude Include="..\wx\include\wx\protocol\log.h" />
<ClInclude Include="..\wx\include\wx\protocol\protocol.h" />
<ClInclude Include="..\wx\include\wx\sckaddr.h" />
<ClInclude Include="..\wx\include\wx\sckipc.h" />
<ClInclude Include="..\wx\include\wx\sckstrm.h" />
<ClInclude Include="..\wx\include\wx\socket.h" />
<ClInclude Include="..\wx\include\wx\url.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
| {
"pile_set_name": "Github"
} |
defmodule Example.ConnCase do
@moduledoc """
This module defines the test case to be used by
tests that require setting up a connection.
Such tests rely on `Phoenix.ConnTest` and also
import other functionality to make it easier
to build and query models.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
using do
quote do
# Import conveniences for testing with connections
use Phoenix.ConnTest
alias Example.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import Example.Router.Helpers
# The default endpoint for testing
@endpoint Example.Endpoint
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Example.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(Example.Repo, {:shared, self()})
end
{:ok, conn: Phoenix.ConnTest.build_conn()}
end
end
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!--
***************************************************************************
Copyright (c) 2010 Qcadoo Limited
Project: Qcadoo MES
Version: 1.4
This file is part of Qcadoo.
Qcadoo is free software; you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
***************************************************************************
-->
<model name="workPlan" activable="true"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schema.qcadoo.org/model"
xsi:schemaLocation="http://schema.qcadoo.org/model http://schema.qcadoo.org/model.xsd">
<fields>
<string name="name" required="true" unique="true">
<validatesLength max="1024"/>
</string>
<datetime name="date"/>
<string name="worker"/>
<boolean name="generated"/>
<string name="fileName">
<validatesLength max="1024"/>
</string>
<boolean name="dontPrintOrdersInWorkPlans"/>
<enum name="type"
values="01noDistinction,02byEndProduct,03byWorkstationType,04byDivision,05byWorkstation,06byStaff,07byProductionLine"
default="01noDistinction" required="true"/>
<hasMany name="workPlanOrderColumns" plugin="workPlans"
model="workPlanOrderColumn" joinField="workPlan" cascade="delete"
copyable="true"/>
<manyToMany name="orders" joinField="workPlans" model="order"
plugin="orders" copyable="true"/>
<belongsTo name="inputProductColumnToSortBy" model="columnForInputProducts"/>
<enum name="orderSorting" values="01asc,02desc"/>
</fields>
<hooks>
<onCreate class="com.qcadoo.mes.workPlans.hooks.WorkPlanHooks"
method="onCreate"/>
<onCopy class="com.qcadoo.mes.workPlans.hooks.WorkPlanHooks"
method="onCopy"/>
</hooks>
<identifier expression="#name"/>
</model> | {
"pile_set_name": "Github"
} |
Class: Tips {#Tips}
===================
Display a tip on any element with a title and/or href.
### Credits
- The idea behind Tips.js is based on [Bubble Tooltips](http://web-graphics.com/mtarchive/001717.php) by [Alessandro Fulcitiniti](http://web-graphics.com/)
### Note
- Tips requires the page to be in [Standards Mode](http://hsivonen.iki.fi/doctype/).
### Implements
- [Events][], [Options][]
Tips Method: constructor
------------------------
### Arguments
* elements - (*mixed*: optional) A collection of elements, a string Selector, or an Element to apply the tooltips to.
* options - (*object*) An object to customize this Tips instance.
### Options
* showDelay - (*number*: defaults to 100) The delay the show event is fired.
* hideDelay - (*number*: defaults to 100) The delay the hide hide is fired.
* hideEmpty - (*boolean*: defaults to *false*) If set to true, the empty tooltip will not be shown.
* title - (*string|function*: defaults to title) The property of the element to be used for the tip-title. If this option is a function it will execute it on every element with it passed as the first argument. It uses the return value of this function as the tip-title
* text - (*string|function*) Behaves the same as the `title` option but for tip-text. By default it either uses the `rel` or the `href` attribute as tip-text.
* className - (*string*: defaults to *null*) The className your tooltip container will get. Useful for styling.
* The tooltip element inside the tooltip container above will have 'tip' as classname.
* The title will have as classname: tip-title
* The text will have as classname: tip-text
* offset - (*object*: defaults to {x: 16, y: 16}) The distance of your tooltip from the mouse.
* fixed - (*boolean*: defaults to *false*) If set to true, the tooltip will not follow the mouse.
* windowPadding - (*object*; defaults to {x: 0, y: 0}) Allows you to reduce or expand the virtual size of the window for tip positioning. The tips will not be allowed to approach the edge of the window on any side based on this offset.
* id - (*string*: defaults to *null*) Add an `id` to the tooltip element, required for WAI-ARIA support.
* waiAria - (*boolean*: defaults to *true*) Requires the `id` option to be set. Enable [WAI-ARIA](http://www.w3.org/WAI/intro/aria.php) support. Adds aria-attributes to the tooltip.
### Events
* show - (*function*: defaults to `function(tip, hovered){ tip.setStyle('display', 'block'); }`) The default function for the show event, passes the tip element and the currently hovered element.
* hide - (*function*: defaults to `function(tip, hovered){ tip.setStyle('display', 'none'); }`) The default function for the hide event, passes the currently hovered element.
* attach - (*function*) Fires when an element gets added to the tips instance. Passes the element as argument.
* detach - (*function*) Fires when the event listeners get removed from an element. Passes the element as argument.
### Example
#### HTML
<a href="http://mootools.net" title="mootools homepage" class="thisisatooltip" />
#### JavaScript
var myTips = new Tips('.thisisatooltip');
Tips Event: show {#Tips:show}
---------------------------------
* (*function*) Fires when the Tip is starting to show and by default sets the tip visible.
### Signature
onShow(tip)
### Arguments
1. tip - (*element*) The tip element. Useful if you want to apply effects to it.
2. el - (*element*) The element on which the tip is based on.
### Example
myTips.addEvent('show', function(tip, el){
tip.addClass('someCustomClassBecauseTheTipIsVisible');
});
### Note
To override the default tip show behavior, you must either declare the onShow event in the options on initialization or remove the onShow event from the class yourself. Example:
var myTips = new Tips('.thisisatooltip', {
onShow: function(tip, el){
tip.setStyles({
visibility: 'hidden',
display: 'block'
}).fade('in');
}
});
//if you want to add this after init
myTips.removeEvents('show').addEvent('show', function(tip, el){
tip.setStyles({
visibility: 'hidden',
display: 'block'
}).fade('in');
});
Tips Event: hide {#Tips:hide}
---------------------------------
* (*function*) Fires when the Tip is starting to hide and by default sets the tip hidden.
### Signature
onHide(tip)
### Arguments
1. tip - (*element*) The tip element. Useful if you want to apply effects to it.
2. el - (*element*) The element on which the tip is based on.
### Example
myTips.addEvent('hide', function(tip, el){
tip.removeClass('someCustomClassBecauseTheTipIsVisible');
});
### Note
To override the default tip hide behavior, you must either declare the onHide event in the options on initialization or remove the onHide event from the class yourself. Example:
var myTips = new Tips('.thisisatooltip', {
onHide: function(tip, el){
tip.fade('out').get('tween').chain(function(){
tip.setStyle('display', 'none');
});
}
});
//if you want to add this after init
myTips.removeEvents('hide').addEvent('hide', function(tip, el){
tip.fade('out').get('tween').chain(function(){
tip.setStyle('display', 'none');
});
});
Tips Method: setTitle {#Tips:setTitle}
----------------------------------
Updates the tip title. Note that the title is re-assigned when the tip is hidden and displayed again; this method allows you to change it after it's visible.
### Syntax
myTips.setTitle(title);
### Arguments
1. title - (*mixed*) A collection of elements, a single Element, or a string of text. The former two being adopted into the tip the latter being set as its HTML.
### Returns
* (*object*) This Tips instance.
### Example
myTips.setTitle("I'm the new title!");
Tips Method: setText {#Tips:setText}
----------------------------------
Updates the tip text. Note that the text is re-assigned when the tip is hidden and displayed again; this method allows you to change it after it's visible.
### Syntax
myTips.setText(text);
### Arguments
1. text - (*mixed*) A collection of elements, a single Element, or a string of text. The former two being adopted into the tip the latter being set as its HTML.
### Returns
* (*object*) This Tips instance.
### Example
myTips.setText("I'm the new body text!");
Tips Method: attach {#Tips:attach}
----------------------------------
Attaches tooltips to elements. Useful to add more elements to a tips instance.
### Syntax
myTips.attach(elements);
### Arguments
1. elements - (*mixed*) A collection of elements, a string Selector, or an Element to apply the tooltips to.
### Returns
* (*object*) This Tips instance.
### Example
myTips.attach('a.thisisatip');
Tips Method: detach {#Tips:detach}
----------------------------------
Detaches tooltips from elements. Useful to remove elements from a tips instance.
### Syntax
myTips.detach(elements);
### Arguments
1. elements - (*mixed*) A collection of elements, a string Selector, or an Element to apply the tooltips to.
### Returns
* (*object*) This Tips instance.
### Example
myTips.detach('a.thisisatip');
Tips HTML Structure {#Tips:HTML}
--------------------------------
<div class="options.className"> //the className you pass in options will be assigned here.
<div class="tip-top"></div> //useful for styling
<div class="tip">
<div class="tip-title"></div>
<div class="tip-text"></div>
</div>
<div class="tip-bottom"></div> //useful for styling
</div>
Tips with storage {#Tips:Storage}
---------------------------------
You can also assign tips titles and contents via [Element Storage](/Element/Element/#ElementStorage).
### Example
#### HTML
<a id="tip1" href="http://mootools.net" title="mootools homepage" class="thisisatooltip" />
#### JavaScript
$('tip1').store('tip:title', 'custom title for tip 1');
$('tip1').store('tip:text', 'custom text for tip 1');
### Note
If you use tips storage you can use elements and / or html as tips title and text.
[Events]: /core/Class/Class.Extras#Events
[Options]: /core/Class/Class.Extras#Options
| {
"pile_set_name": "Github"
} |
package com.nihaojewelry.admin.shiro.JwtToken;
import org.apache.shiro.authc.AuthenticationToken;
/**
* @author:aodeng(低调小熊猫)
* @blog:(https://aodeng.cc)
* @Description: TODO
* @Date: 19-4-29
**/
public class JWTToken implements AuthenticationToken {
private String token;
public JWTToken(String token) {
this.token = token;
}
@Override
public Object getPrincipal() {
return token;
}
@Override
public Object getCredentials() {
return token;
}
}
| {
"pile_set_name": "Github"
} |
<ui>
<menubar>
<menu name="Edit" action="EditAction">
<placeholder name="EditSelectedPlaceholder">
<menuitem name="EditEditInvoice" action="EditEditInvoiceAction"/>
<menuitem name="EditDuplicateInvoice" action="EditDuplicateInvoiceAction"/>
<menuitem name="EditPostInvoice" action="EditPostInvoiceAction"/>
<menuitem name="EditUnpostInvoice" action="EditUnpostInvoiceAction"/>
</placeholder>
</menu>
<menu name="View" action="ViewAction">
<placeholder name="ViewPlaceholder">
<separator name="ViewSep44"/>
<menuitem name="ViewSaveLayout" action="ViewSaveLayoutAction"/>
<menuitem name="ViewResetLayout" action="ViewResetLayoutAction"/>
<separator name="ViewSep45"/>
<menu name="SortOrder" action="SortOrderAction">
<menuitem name="SortStandard" action="SortStandardAction"/>
<separator name="ViewSep43"/>
<menuitem name="SortDate" action="SortDateAction"/>
<menuitem name="SortDateEntry" action="SortDateEntryAction"/>
<menuitem name="SortQuantity" action="SortQuantityAction"/>
<menuitem name="SortPrice" action="SortPriceAction"/>
<menuitem name="SortDescription" action="SortDescriptionAction"/>
</menu>
</placeholder>
</menu>
<menu name="Actions" action="ActionsAction">
<placeholder name="ActionsPlaceholder">
<menuitem name="RecordEntry" action="RecordEntryAction"/>
<menuitem name="CancelEntry" action="CancelEntryAction"/>
<menuitem name="DeleteEntry" action="DeleteEntryAction"/>
<menuitem name="BlankEntry" action="BlankEntryAction"/>
<separator name="ActionsSep4"/>
<menuitem name="DuplicateEntry" action="DuplicateEntryAction"/>
<menuitem name="UpEntry" action="EntryUpAction"/>
<menuitem name="DownEntry" action="EntryDownAction"/>
</placeholder>
</menu>
<placeholder name="AdditionalMenusPlaceholder">
<menu name="Business" action="BusinessAction">
<placeholder name="BusinessPlaceholderMiddle">
<separator name="BusinessSep1"/>
<menuitem name="BusinessLink" action="BusinessLinkAction"/>
<menuitem name="BusinessLinkOpen" action="BusinessLinkOpenAction"/>
<separator name="BusinessSep2"/>
<menuitem name="ToolsProcessPayment" action="ToolsProcessPaymentAction"/>
</placeholder>
</menu>
</placeholder>
<menu name="Reports" action="ReportsAction">
<placeholder name="ReportsPlaceholder">
<menuitem name="ReportsCompanyReport" action="ReportsCompanyReportAction"/>
</placeholder>
</menu>
</menubar>
<toolbar name="DefaultToolbar">
<placeholder name="ToolbarSavePlaceholder">
<toolitem name="ToolbarPrintInvoice" action="FilePrintAction"/>
</placeholder>
<placeholder name="DefaultToolbarPlaceholder">
<toolitem name="ToolbarNewInvoice" action="BusinessNewInvoiceAction"/>
<toolitem name="ToolbarEditInvoice" action="EditEditInvoiceAction"/>
<toolitem name="ToolbarDuplicateInvoice" action="EditDuplicateInvoiceAction"/>
<separator name="ToolbarSep65"/>
<toolitem name="ToolbarRecordEntry" action="RecordEntryAction"/>
<toolitem name="ToolbarCancelEntry" action="CancelEntryAction"/>
<toolitem name="ToolbarDeleteEntry" action="DeleteEntryAction"/>
<toolitem name="ToolbarDuplicateEntry" action="DuplicateEntryAction"/>
<toolitem name="ToolbarUpEntry" action="EntryUpAction"/>
<toolitem name="ToolbarDownEntry" action="EntryDownAction"/>
<toolitem name="ToolbarBlankEntry" action="BlankEntryAction"/>
<separator name="ToolbarSep68"/>
<toolitem name="ToolbarPostInvoice" action="EditPostInvoiceAction"/>
<toolitem name="ToolbarUnPostInvoice" action="EditUnpostInvoiceAction"/>
<toolitem name="ToolbarProcessPayment" action="ToolsProcessPaymentAction"/>
</placeholder>
</toolbar>
<popup name="MainPopup" action="FakeToplevel">
<placeholder name="PopupPlaceholder2">
<menuitem name="RecordEntry" action="RecordEntryAction"/>
<menuitem name="CancelEntry" action="CancelEntryAction"/>
<menuitem name="DeleteEntry" action="DeleteEntryAction"/>
<separator name="PopupSep1"/>
<menuitem name="DuplicateEntry" action="DuplicateEntryAction"/>
<menuitem name="UpEntry" action="EntryUpAction"/>
<menuitem name="DownEntry" action="EntryDownAction"/>
<menuitem name="BlankEntry" action="BlankEntryAction"/>
</placeholder>
</popup>
</ui>
| {
"pile_set_name": "Github"
} |
@import 'theme-neptune-all-rtl_1.css';
@import 'theme-neptune-all-rtl_2.css';
| {
"pile_set_name": "Github"
} |
PREHOOK: query: CREATE TABLE part_table_n1(key string, value string) PARTITIONED BY (partitionId int)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@part_table_n1
POSTHOOK: query: CREATE TABLE part_table_n1(key string, value string) PARTITIONED BY (partitionId int)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@part_table_n1
PREHOOK: query: INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=1)
SELECT key, value FROM src ORDER BY key, value LIMIT 100
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@part_table_n1@partitionid=1
POSTHOOK: query: INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=1)
SELECT key, value FROM src ORDER BY key, value LIMIT 100
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@part_table_n1@partitionid=1
POSTHOOK: Lineage: part_table_n1 PARTITION(partitionid=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: part_table_n1 PARTITION(partitionid=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
PREHOOK: query: INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=2)
SELECT key, value FROM src1 ORDER BY key, value
PREHOOK: type: QUERY
PREHOOK: Input: default@src1
PREHOOK: Output: default@part_table_n1@partitionid=2
POSTHOOK: query: INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=2)
SELECT key, value FROM src1 ORDER BY key, value
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src1
POSTHOOK: Output: default@part_table_n1@partitionid=2
POSTHOOK: Lineage: part_table_n1 PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: part_table_n1 PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
PREHOOK: query: EXPLAIN
SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 1 AND
y.partitionId = 2
GROUP BY x.key
PREHOOK: type: QUERY
PREHOOK: Input: default@part_table_n1
PREHOOK: Input: default@part_table_n1@partitionid=1
PREHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN
SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 1 AND
y.partitionId = 2
GROUP BY x.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@part_table_n1
POSTHOOK: Input: default@part_table_n1@partitionid=1
POSTHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: x
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 100 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 100 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
Statistics: Num rows: 100 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 100 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Map 4
Map Operator Tree:
TableScan
alias: y
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Merge Join Operator
condition map:
Inner Join 0 to 1
keys:
0 _col0 (type: string)
1 _col0 (type: string)
outputColumnNames: _col0
Statistics: Num rows: 40 Data size: 3480 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: _col0 (type: string)
minReductionHashAggr: 0.825
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 7 Data size: 665 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 7 Data size: 665 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1
Statistics: Num rows: 7 Data size: 665 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 7 Data size: 665 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 1 AND
y.partitionId = 2
GROUP BY x.key
PREHOOK: type: QUERY
PREHOOK: Input: default@part_table_n1
PREHOOK: Input: default@part_table_n1@partitionid=1
PREHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
POSTHOOK: query: SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 1 AND
y.partitionId = 2
GROUP BY x.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@part_table_n1
POSTHOOK: Input: default@part_table_n1@partitionid=1
POSTHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
146 2
128 3
150 1
PREHOOK: query: EXPLAIN
SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 1 AND
y.partitionId = 2
GROUP BY x.key
PREHOOK: type: QUERY
PREHOOK: Input: default@part_table_n1
PREHOOK: Input: default@part_table_n1@partitionid=1
PREHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN
SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 1 AND
y.partitionId = 2
GROUP BY x.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@part_table_n1
POSTHOOK: Input: default@part_table_n1@partitionid=1
POSTHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: x
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 100 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 100 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
Statistics: Num rows: 100 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 100 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Map 4
Map Operator Tree:
TableScan
alias: y
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Merge Join Operator
condition map:
Inner Join 0 to 1
keys:
0 _col0 (type: string)
1 _col0 (type: string)
outputColumnNames: _col0
Statistics: Num rows: 40 Data size: 3480 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: _col0 (type: string)
minReductionHashAggr: 0.825
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 7 Data size: 665 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 7 Data size: 665 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1
Statistics: Num rows: 7 Data size: 665 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 7 Data size: 665 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 1 AND
y.partitionId = 2
GROUP BY x.key
PREHOOK: type: QUERY
PREHOOK: Input: default@part_table_n1
PREHOOK: Input: default@part_table_n1@partitionid=1
PREHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
POSTHOOK: query: SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 1 AND
y.partitionId = 2
GROUP BY x.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@part_table_n1
POSTHOOK: Input: default@part_table_n1@partitionid=1
POSTHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
146 2
128 3
150 1
PREHOOK: query: EXPLAIN
SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 2 AND
y.partitionId = 2
GROUP BY x.key
PREHOOK: type: QUERY
PREHOOK: Input: default@part_table_n1
PREHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN
SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 2 AND
y.partitionId = 2
GROUP BY x.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@part_table_n1
POSTHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: x
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Map 4
Map Operator Tree:
TableScan
alias: y
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Merge Join Operator
condition map:
Inner Join 0 to 1
keys:
0 _col0 (type: string)
1 _col0 (type: string)
outputColumnNames: _col0
Statistics: Num rows: 39 Data size: 3354 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: _col0 (type: string)
minReductionHashAggr: 0.5897436
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 16 Data size: 1504 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 16 Data size: 1504 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1
Statistics: Num rows: 16 Data size: 1504 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 16 Data size: 1504 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 2 AND
y.partitionId = 2
GROUP BY x.key
PREHOOK: type: QUERY
PREHOOK: Input: default@part_table_n1
PREHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
POSTHOOK: query: SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 2 AND
y.partitionId = 2
GROUP BY x.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@part_table_n1
POSTHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
100
146 1
213 1
273 1
311 1
369 1
406 1
66 1
98 1
128 1
150 1
224 1
238 1
255 1
278 1
401 1
PREHOOK: query: EXPLAIN
SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 2 AND
y.partitionId = 2
GROUP BY x.key
PREHOOK: type: QUERY
PREHOOK: Input: default@part_table_n1
PREHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN
SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 2 AND
y.partitionId = 2
GROUP BY x.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@part_table_n1
POSTHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: x
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Map 4
Map Operator Tree:
TableScan
alias: y
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Merge Join Operator
condition map:
Inner Join 0 to 1
keys:
0 _col0 (type: string)
1 _col0 (type: string)
outputColumnNames: _col0
Statistics: Num rows: 39 Data size: 3354 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: _col0 (type: string)
minReductionHashAggr: 0.5897436
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 16 Data size: 1504 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 16 Data size: 1504 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1
Statistics: Num rows: 16 Data size: 1504 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 16 Data size: 1504 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 2 AND
y.partitionId = 2
GROUP BY x.key
PREHOOK: type: QUERY
PREHOOK: Input: default@part_table_n1
PREHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
POSTHOOK: query: SELECT x.key AS key, count(1) AS cnt
FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key)
WHERE x.partitionId = 2 AND
y.partitionId = 2
GROUP BY x.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@part_table_n1
POSTHOOK: Input: default@part_table_n1@partitionid=2
#### A masked pattern was here ####
100
146 1
213 1
273 1
311 1
369 1
406 1
66 1
98 1
128 1
150 1
224 1
238 1
255 1
278 1
401 1
| {
"pile_set_name": "Github"
} |
# makefile include fragment for ported device systems
#
# Copyright 2016 Codethink Ltd
include $(srctree)/compat26/Makefile-pdk.inc
ccflags-y += -include $(srctree)/compat26/include/compat26.h
ccflags-y += -include $(srctree)/compat26/include/compat26_clk.h
| {
"pile_set_name": "Github"
} |
//--------------------------------------------------------------------------
// Copyright (C) 2014-2020 Cisco and/or its affiliates. All rights reserved.
// Copyright (C) 2005-2013 Sourcefire, Inc.
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License Version 2 as published
// by the Free Software Foundation. You may not use, modify or distribute
// this program under any other version of the GNU General Public License.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//--------------------------------------------------------------------------
/*
** bnfa_search.c
**
** Basic multi-pattern search engine using Aho-Corasick NFA construction.
**
** Version 3.0 (based on acsmx.c and acsmx2.c)
**
** author: marc norton
** date: started 12/21/05
**
** General Design
** Aho-Corasick based NFA state machine.
** Compacted sparse storage mode for better performance.
** Up to 16 Million states + transitions (combined) in compacted sparse mode.
**
** ** Compacted sparse array storage **
**
** The primary data is held in one array.
** The patterns themselves are stored separately.
** The matching lists of patterns for each state are stored separately as well.
** The compacted sparse format improves caching/performance.
**
** word 1 : state ( only low 24 bits are used )
** word 2 : control word = cb << 24 | fs
** cb: control byte
** cb = mb | fb | nt
** mb : 8th bit - if set state has matching patterns bit
** fb : 7th bit - if set full storage array bit (256 entries used),
else sparse
** nt : 0-63= number of transitions (more than 63 requires full storage)
** fs: 24 bits for failure state transition index.
** word 3+ : transition word = input<<24 | next-state-index
** input : 8 bit character, input to state machine from search text
** next-state-index: 24 bits for index of next state
** (if we really need 16M states, we can add a state->index lookup array)
** ...repeat for each state ...
**
** * if a state is empty it has words 1 and 2, but no transition words.
**
** Construction:
**
** Patterns are added to a list based trie.
** The list based trie is compiled into a list based NFA with failure states.
** The list based NFA is converted to full or sparse format NFA.
** The Zero'th state sparse transitions may be stored in full format for
** performance.
** Sparse transition arrays are searched using linear and binary search
** strategies depending on the number of entries to search through in
** each state.
** The state machine in sparse mode is compacted into a single vector for
** better performance.
**
** Notes:
**
** The NFA can require twice the state transitions that a DFA uses. However,
** the construction of a DFA generates many additional transitions in each
** state which consumes significant additional memory. This particular
** implementation is best suited to environments where the very large memory
** requirements of a full state table implementation is not possible and/or
** the speed trade off is warranted to maintain a small memory footprint.
**
** Each state of an NFA usually has very few transitions but can have up to
** 256. It is important to not degenerate into a linear search so we utilize
** a binary search if there are more than 5 elements in the state to test for
** a match. This allows us to use a simple sparse memory design with an
** acceptable worst case search scenario. The binary search over 256 elements
** is limited to a max of 8 tests. The zero'th state may use a full 256 state
** array, so a quick index lookup provides the next state transition. The
** zero'th state is generally visited much more than other states.
**
** Compiling : gcc, Intel C/C++, Microsoft C/C++, each optimize differently.
** My studies have shown Intel C/C++ 9,8,7 to be the fastest, Microsoft 8,7,6
** is next fastest, and gcc 4.x,3.x,2.x is the slowest of the three. My
** testing has been mainly on x86. In general gcc does a poor job with
** optimizing this state machine for performance, compared to other less cache
** and prefetch sensitive algorithms. I've documented this behavior in a
** paper 'Optimizing Pattern Matching for IDS' (www.sourcefire.com,
** www.idsresearch.org).
**
** The code is sensitive to cache optimization and prefetching, as well as
** instruction pipelining. Aren't we all. To this end, the number of
** patterns, length of search text, and cpu cache L1,L2,L3 all affect
** performance. The relative performance of the sparse and full format NFA and
** DFA varies as you vary the pattern characteristics, and search text length,
** but strong performance trends are present and stable.
**
**
** BNFA API SUMMARY
**
** bnfa=bnfaNew(); create a state machine
** bnfaAddPattern(bnfa,..); add a pattern to the state machine
** bnfaCompile (bnfa,..) compile the state machine
** bnfaPrintInfo(bnfa); print memory usage and state info
** bnfaPrint(bnfa); print the state machine in total
** state=bnfaSearch(bnfa, ...,state); search a data buffer for a pattern match
** bnfaFree (bnfa); free the bnfa
**
**
** Reference - Efficient String matching: An Aid to Bibliographic Search
** Alfred V Aho and Margaret J Corasick
** Bell Laboratories
** Copyright (C) 1975 Association for Computing Machinery,Inc
**
** 12/4/06 - man - modified summary
** 6/26/07 - man - Added last_match tracking, and accounted for nocase/case by
** presetting the last match state, and reverting if we fail the
** case memcmp test for any rule in the states matching rule
** list. The states in the default matcher represent either
** case or nocase states, so they are dual mode, that makes
** this a bit tricky. When we sue the pure exact match, or
** pure don't care matching routines, we just track the last
** state, and never need to revert. This only tracks the
** single repeated states and repeated data.
** 01/2008 - man - added 2 phase pattern matcher using a pattern match queue.
** Text is scanned and matching states are queued, duplicate
** matches are dropped, and after the complete buffer scan the
** queued matches are processed. This improves caching
** performance, and reduces duplicate rule processing. The
** queue is limited in size and is flushed if it becomes full
** during the scan. This allows simple insertions. Tracking
** queue ops is optional, as this can impose a modest
** performance hit of a few percent.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "bnfa_search.h"
#include <list>
#include "log/messages.h"
#include "utils/stats.h"
#include "utils/util.h"
using namespace snort;
/*
* Used to initialize last state, states are limited to 0-16M
* so this will not conflict.
*/
#define LAST_STATE_INIT 0xffffffff
#define printf LogMessage
/*
* Case Translation Table - this guarantees we use
* indexed lookups for case conversion
*/
static unsigned xlatinit = 1;
static uint8_t xlatcase[BNFA_MAX_ALPHABET_SIZE];
void bnfa_init_xlatcase()
{
if ( !xlatinit )
return;
for (int i=0; i<BNFA_MAX_ALPHABET_SIZE; i++)
{
xlatcase[i] = (uint8_t)toupper(i);
}
xlatinit = 0;
}
/*
* Custom memory allocator
*/
static void* bnfa_alloc(int n, int* m)
{
if ( !n )
return nullptr;
void* p = snort_calloc(n);
if (m)
m[0] += n;
return p;
}
static void bnfa_free(void* p, int n, int* m)
{
if ( p )
{
snort_free(p);
if (m)
{
m[0] -= n;
}
}
}
#define BNFA_MALLOC(n,memory) (bnfa_state_t*)bnfa_alloc(n,&(memory))
#define BNFA_FREE(p,n,memory) bnfa_free(p,n,&(memory))
/*
* Get next state from transition list
*/
static int _bnfa_list_get_next_state(bnfa_struct_t* bnfa, int state, int input)
{
if ( state == 0 ) /* Full set of states always */
{
bnfa_state_t* p = (bnfa_state_t*)bnfa->bnfaTransTable[0];
if (!p)
{
return 0;
}
return p[input];
}
else
{
bnfa_trans_node_t* t = bnfa->bnfaTransTable[state];
while ( t )
{
if ( t->key == (unsigned)input )
{
return t->next_state;
}
t=t->next;
}
return BNFA_FAIL_STATE; /* Fail state */
}
}
/*
* Put next state - head insertion, and transition updates
*/
static int _bnfa_list_put_next_state(bnfa_struct_t* bnfa, int state, int input, int next_state)
{
if ( state >= bnfa->bnfaMaxStates )
{
return -1;
}
if ( input >= bnfa->bnfaAlphabetSize )
{
return -1;
}
if ( state == 0 )
{
bnfa_state_t* p;
p = (bnfa_state_t*)bnfa->bnfaTransTable[0];
if ( !p )
{
p = (bnfa_state_t*)BNFA_MALLOC(sizeof(bnfa_state_t)*bnfa->bnfaAlphabetSize,
bnfa->list_memory);
if ( !p )
{
return -1;
}
bnfa->bnfaTransTable[0] = (bnfa_trans_node_t*)p;
}
if ( p[input] )
{
p[input] = next_state;
return 0;
}
p[input] = next_state;
}
else
{
bnfa_trans_node_t* p;
bnfa_trans_node_t* tnew;
/* Check if the transition already exists, if so just update the next_state */
p = bnfa->bnfaTransTable[state];
while ( p )
{
if ( p->key == (unsigned)input ) /* transition already exists- reset the next state
*/
{
p->next_state = next_state;
return 0;
}
p=p->next;
}
/* Definitely not an existing transition - add it */
tnew = (bnfa_trans_node_t*)BNFA_MALLOC(sizeof(bnfa_trans_node_t),bnfa->list_memory);
if ( !tnew )
{
return -1;
}
tnew->key = input;
tnew->next_state = next_state;
tnew->next = bnfa->bnfaTransTable[state];
bnfa->bnfaTransTable[state] = tnew;
}
bnfa->bnfaNumTrans++;
return 0;
}
/*
* Free the entire transition list table
*/
static int _bnfa_list_free_table(bnfa_struct_t* bnfa)
{
int i;
bnfa_trans_node_t* t, * p;
if ( !bnfa->bnfaTransTable )
return 0;
if ( bnfa->bnfaTransTable[0] )
{
BNFA_FREE(bnfa->bnfaTransTable[0],sizeof(bnfa_state_t)*bnfa->bnfaAlphabetSize,
bnfa->list_memory);
}
for (i=1; i<bnfa->bnfaMaxStates; i++)
{
t = bnfa->bnfaTransTable[i];
while ( t )
{
p = t;
t = t->next;
BNFA_FREE(p,sizeof(bnfa_trans_node_t),bnfa->list_memory);
}
}
if ( bnfa->bnfaTransTable )
{
BNFA_FREE(bnfa->bnfaTransTable,sizeof(bnfa_trans_node_t*)*bnfa->bnfaMaxStates,
bnfa->list_memory);
bnfa->bnfaTransTable = nullptr;
}
return 0;
}
static void bnfaBuildMatchStateTrees(SnortConfig* sc, bnfa_struct_t* bnfa)
{
bnfa_match_node_t* mn;
bnfa_match_node_t** MatchList = bnfa->bnfaMatchList;
bnfa_pattern_t* patrn;
for (int i=0; i<bnfa->bnfaNumStates; i++)
{
for (mn = MatchList[i]; mn!= nullptr; mn = mn->next )
{
patrn = (bnfa_pattern_t*)mn->data;
if (patrn->userdata)
{
if (patrn->negative)
{
bnfa->agent->negate_list(patrn->userdata, &MatchList[i]->neg_list);
}
else
{
bnfa->agent->build_tree(sc, patrn->userdata, &MatchList[i]->rule_option_tree);
}
}
}
/* Last call to finalize the tree */
if (MatchList[i])
{
bnfa->agent->build_tree(sc, nullptr, &MatchList[i]->rule_option_tree);
}
}
}
#ifdef ALLOW_LIST_PRINT
/*
* Print the transition list table to stdout
*/
static int _bnfa_list_print_table(bnfa_struct_t* bnfa)
{
int i;
bnfa_trans_node_t* t;
bnfa_match_node_t* mn;
bnfa_pattern_t* patrn;
if ( !bnfa->bnfaTransTable )
{
return 0;
}
printf("Print Transition Table- %d active states\n",bnfa->bnfaNumStates);
for (i=0; i< bnfa->bnfaNumStates; i++)
{
printf("state %3d: ",i);
if ( i == 0 )
{
int k;
bnfa_state_t* p = (bnfa_state_t*)bnfa->bnfaTransTable[0];
if (!p)
continue;
for (k=0; k<bnfa->bnfaAlphabetSize; k++)
{
if ( p[k] == 0 )
continue;
if ( isascii((int)p[k]) && isprint((int)p[k]) )
printf("%3c->%-5d\t",k,p[k]);
else
printf("%3d->%-5d\t",k,p[k]);
}
}
else
{
t = bnfa->bnfaTransTable[i];
while ( t )
{
if ( isascii((int)t->key) && isprint((int)t->key) )
printf("%3c->%-5d\t",t->key,t->next_state);
else
printf("%3d->%-5d\t",t->key,t->next_state);
t = t->next;
}
}
mn =bnfa->bnfaMatchList[i];
while ( mn )
{
patrn =(bnfa_pattern_t*)mn->data;
printf("%.*s ",patrn->n,patrn->casepatrn);
mn = mn->next;
}
printf("\n");
}
return 0;
}
#endif
/*
* Converts a single row of states from list format to a full format
*/
static int _bnfa_list_conv_row_to_full(bnfa_struct_t* bnfa, bnfa_state_t state, bnfa_state_t* full)
{
if ( (int)state >= bnfa->bnfaMaxStates ) /* protects 'full' against overflow */
{
return -1;
}
if ( state == 0 )
{
if ( bnfa->bnfaTransTable[0] )
memcpy(full,bnfa->bnfaTransTable[0],sizeof(bnfa_state_t)*bnfa->bnfaAlphabetSize);
else
memset(full,0,sizeof(bnfa_state_t)*bnfa->bnfaAlphabetSize);
return bnfa->bnfaAlphabetSize;
}
else
{
int tcnt = 0;
bnfa_trans_node_t* t = bnfa->bnfaTransTable[ state ];
memset(full,0,sizeof(bnfa_state_t)*bnfa->bnfaAlphabetSize);
if ( !t )
{
return 0;
}
while (t && (t->key < BNFA_MAX_ALPHABET_SIZE ) )
{
full[ t->key ] = t->next_state;
tcnt++;
t = t->next;
}
return tcnt;
}
}
/*
* Add pattern characters to the initial upper case trie
* unless Exact has been specified, in which case all patterns
* are assumed to be case specific.
*/
static int _bnfa_add_pattern_states(bnfa_struct_t* bnfa, bnfa_pattern_t* p)
{
int state, next, n;
uint8_t* pattern;
bnfa_match_node_t* pmn;
n = p->n;
pattern = p->casepatrn;
state = 0;
/*
* Match up pattern with existing states
*/
for (; n > 0; pattern++, n--)
{
if ( bnfa->bnfaCaseMode == BNFA_CASE )
next = _bnfa_list_get_next_state(bnfa,state,*pattern);
else
next = _bnfa_list_get_next_state(bnfa,state,xlatcase[*pattern]);
if ( next == (int)BNFA_FAIL_STATE || next == 0 )
{
break;
}
state = next;
}
/*
* Add new states for the rest of the pattern bytes, 1 state per byte, uppercase
*/
for (; n > 0; pattern++, n--)
{
bnfa->bnfaNumStates++;
if ( bnfa->bnfaCaseMode == BNFA_CASE )
{
if ( _bnfa_list_put_next_state(bnfa,state,*pattern,bnfa->bnfaNumStates) < 0 )
return -1;
}
else
{
if ( _bnfa_list_put_next_state(bnfa,state,xlatcase[*pattern],bnfa->bnfaNumStates) <
0 )
return -1;
}
state = bnfa->bnfaNumStates;
if ( bnfa->bnfaNumStates >= bnfa->bnfaMaxStates )
{
return -1;
}
}
/* Add a pattern to the list of patterns terminated at this state */
pmn = (bnfa_match_node_t*)BNFA_MALLOC(sizeof(bnfa_match_node_t),bnfa->matchlist_memory);
if ( !pmn )
{
return -1;
}
pmn->data = p;
pmn->next = bnfa->bnfaMatchList[state];
bnfa->bnfaMatchList[state] = pmn;
return 0;
}
#ifdef XXXXX
int _bnfa_list_get_next_state(bnfa_struct_t* bnfa, int state, int input)
{
if ( state == 0 ) /* Full set of states always */
{
bnfa_state_t* p = (bnfa_state_t*)bnfa->bnfaTransTable[0];
if (!p)
{
return 0;
}
return p[input];
}
else
{
bnfa_trans_node_t* t = bnfa->bnfaTransTable[state];
while ( t )
{
if ( t->key == (unsigned)input )
{
return t->next_state;
}
t=t->next;
}
return BNFA_FAIL_STATE; /* Fail state */
}
}
#endif
#ifdef ENABLE_BNFA_FAIL_STATE_OPT
/* used only by KcontainsJ() */
static int _bnfa_conv_node_to_full(bnfa_trans_node_t* t, bnfa_state_t* full)
{
int tcnt = 0;
memset(full,0,sizeof(bnfa_state_t)*BNFA_MAX_ALPHABET_SIZE);
if ( !t )
{
return 0;
}
while (t && (t->key < BNFA_MAX_ALPHABET_SIZE ) )
{
full[ t->key ] = t->next_state;
tcnt++;
t = t->next;
}
return tcnt;
}
#ifdef XXXX
// containment test - test if all of tj transitions are in tk
static int KcontainsJx(bnfa_trans_node_t* tk, bnfa_trans_node_t* tj)
{
while ( tj )
{
int found=0;
for ( bnfa_trans_node_t* t=tk; t; t=t->next )
{
if ( tj->key == t->key )
{
found=1;
break;
}
}
if ( !found )
return 0;
tj=tj->next; /* get next tj key */
}
return 1;
}
#endif
static int KcontainsJ(bnfa_trans_node_t* tk, bnfa_trans_node_t* tj)
{
bnfa_state_t full[BNFA_MAX_ALPHABET_SIZE];
if ( !_bnfa_conv_node_to_full(tk,full) )
return 1; /* empty state */
while ( tj )
{
if ( !full[tj->key] )
return 0;
tj=tj->next; /* get next tj key */
}
return 1;
}
/*
* 1st optimization - eliminate duplicate fail states
*
* check if a fail state is a subset of the current state,
* if so recurse to the next fail state, and so on.
*/
static int _bnfa_opt_nfa(bnfa_struct_t* bnfa)
{
#if 0
int cnt=0;
#endif
bnfa_state_t* FailState = bnfa->bnfaFailState;
for (int k=2; k<bnfa->bnfaNumStates; k++)
{
int fs = FailState[k];
int fr = fs;
while ( fs && KcontainsJ(bnfa->bnfaTransTable[k],bnfa->bnfaTransTable[fs]) )
{
fs = FailState[fs];
}
if ( fr != fs )
{
#if 0
cnt++;
#endif
FailState[ k ] = fs;
}
}
#if 0
if ( cnt)
LogMessage("ac-bnfa: %d nfa optimizations found in %d states\n",cnt,bnfa->bnfaNumStates);
#endif
return 0;
}
#endif // ENABLE_BNFA_FAIL_STATE_OPT
/*
* Build a non-deterministic finite automata using Aho-Corasick construction
* The keyword trie must already be built via _bnfa_add_pattern_states()
*/
static int _bnfa_build_nfa(bnfa_struct_t* bnfa)
{
bnfa_state_t* FailState = bnfa->bnfaFailState;
bnfa_match_node_t** MatchList = bnfa->bnfaMatchList;
bnfa_match_node_t* mlist;
bnfa_match_node_t* px;
std::list<int> queue;
/* Add the state 0 transitions 1st,
* the states at depth 1, fail to state 0
*/
for (int i = 0; i < bnfa->bnfaAlphabetSize; i++)
{
/* note that state zero deos not fail,
* it just returns 0..nstates-1
*/
int s = _bnfa_list_get_next_state(bnfa,0,i);
if ( s ) /* don't bother adding state zero */
{
queue.emplace_back(s);
FailState[s] = 0;
}
}
/* Build the fail state successive layer of transitions */
for ( auto r : queue )
{
/* Find Final States for any Failure */
for (int i = 0; i<bnfa->bnfaAlphabetSize; i++)
{
int fs, next;
int s = _bnfa_list_get_next_state(bnfa,r,i);
if ( s == (int)BNFA_FAIL_STATE )
continue;
queue.emplace_back(s);
fs = FailState[r];
/*
* Locate the next valid state for 'i' starting at fs
*/
while ( (next=_bnfa_list_get_next_state(bnfa,fs,i)) == (int)BNFA_FAIL_STATE )
{
fs = FailState[fs];
}
/*
* Update 's' state failure state to point to the next valid state
*/
FailState[s] = next;
/*
* Copy 'next' states MatchList into 's' states MatchList,
* we just create a new list nodes, the patterns are not copied.
*/
for ( mlist = MatchList[next]; mlist; mlist = mlist->next)
{
/* Dup the node, don't copy the data */
px = (bnfa_match_node_t*)BNFA_MALLOC(sizeof(bnfa_match_node_t),
bnfa->matchlist_memory);
if ( !px )
{
return 0;
}
px->data = mlist->data;
px->next = MatchList[s]; /* insert at head */
MatchList[s] = px;
}
}
}
#ifdef ENABLE_BNFA_FAIL_STATE_OPT
// FIXIT-L low priority performance issue: bnfa fail state reduction
// optimize the failure states
if ( bnfa->bnfaOpt )
_bnfa_opt_nfa(bnfa);
#endif
return 0;
}
#ifdef ALLOW_NFA_FULL
/*
* Convert state machine to full format
*/
static int _bnfa_conv_list_to_full(bnfa_struct_t* bnfa)
{
int k;
bnfa_state_t* p;
bnfa_state_t** NextState = bnfa->bnfaNextState;
for (k=0; k<bnfa->bnfaNumStates; k++)
{
p = BNFA_MALLOC(sizeof(bnfa_state_t)*bnfa->bnfaAlphabetSize,bnfa->nextstate_memory);
if (!p)
{
return -1;
}
_bnfa_list_conv_row_to_full(bnfa, (bnfa_state_t)k, p);
NextState[k] = p; /* now we have a full format row vector */
}
return 0;
}
#endif
/*
* Convert state machine to csparse format
*
* Merges state/transition/failure arrays into one.
*
* For each state we use a state-word followed by the transition list for
* the state sw(state 0 )...tl(state 0) sw(state 1)...tl(state1) sw(state2)...
* tl(state2) ....
*
* The transition and failure states are replaced with the start index of
* transition state, this eliminates the NextState[] lookup....
*
* The compaction of multiple arrays into a single array reduces the total
* number of states that can be handled since the max index is 2^24-1,
* whereas without compaction we had 2^24-1 states.
*/
static int _bnfa_conv_list_to_csparse_array(bnfa_struct_t* bnfa)
{
int m, k, i, nc;
bnfa_state_t state;
bnfa_state_t* FailState = (bnfa_state_t*)bnfa->bnfaFailState;
bnfa_state_t* ps; /* transition list */
bnfa_state_t* pi; /* state indexes into ps */
bnfa_state_t ps_index=0;
unsigned nps;
bnfa_state_t full[BNFA_MAX_ALPHABET_SIZE];
/* count total state transitions, account for state and control words */
nps = 0;
for (k=0; k<bnfa->bnfaNumStates; k++)
{
nps++; /* state word */
nps++; /* control word */
/* count transitions */
nc = 0;
_bnfa_list_conv_row_to_full(bnfa, (bnfa_state_t)k, full);
for ( i=0; i<bnfa->bnfaAlphabetSize; i++ )
{
state = full[i] & BNFA_SPARSE_MAX_STATE;
if ( state != 0 )
{
nc++;
}
}
/* add in transition count */
if ( (k == 0 && bnfa->bnfaForceFullZeroState) || nc > BNFA_SPARSE_MAX_ROW_TRANSITIONS )
{
nps += BNFA_MAX_ALPHABET_SIZE;
}
else
{
for ( i=0; i<bnfa->bnfaAlphabetSize; i++ )
{
state = full[i] & BNFA_SPARSE_MAX_STATE;
if ( state != 0 )
{
nps++;
}
}
}
}
/* check if we have too many states + transitions */
if ( nps > BNFA_SPARSE_MAX_STATE )
{
/* Fatal */
return -1;
}
/*
Alloc The Transition List - we need an array of bnfa_state_t items of size 'nps'
*/
ps = BNFA_MALLOC(nps*sizeof(bnfa_state_t),bnfa->nextstate_memory);
if ( !ps )
{
/* Fatal */
return -1;
}
bnfa->bnfaTransList = ps;
/*
State Index list for pi - we need an array of bnfa_state_t items of size 'NumStates'
*/
pi = BNFA_MALLOC(bnfa->bnfaNumStates*sizeof(bnfa_state_t),bnfa->nextstate_memory);
if ( !pi )
{
/* Fatal */
return -1;
}
/*
Build the Transition List Array
*/
for (k=0; k<bnfa->bnfaNumStates; k++)
{
pi[k] = ps_index; /* save index of start of state 'k' */
ps[ ps_index ] = k; /* save the state were in as the 1st word */
ps_index++; /* skip past state word */
/* convert state 'k' to full format */
_bnfa_list_conv_row_to_full(bnfa, (bnfa_state_t)k, full);
/* count transitions */
nc = 0;
for ( i=0; i<bnfa->bnfaAlphabetSize; i++ )
{
state = full[i] & BNFA_SPARSE_MAX_STATE;
if ( state != 0 )
{
nc++;
}
}
/* add a full state or a sparse state */
if ( (k == 0 && bnfa->bnfaForceFullZeroState) ||
nc > BNFA_SPARSE_MAX_ROW_TRANSITIONS )
{
/* set the control word */
ps[ps_index] = BNFA_SPARSE_FULL_BIT;
ps[ps_index] |= FailState[k] & BNFA_SPARSE_MAX_STATE;
if ( bnfa->bnfaMatchList[k] )
{
ps[ps_index] |= BNFA_SPARSE_MATCH_BIT;
}
ps_index++;
/* copy the transitions */
_bnfa_list_conv_row_to_full(bnfa, (bnfa_state_t)k, &ps[ps_index]);
ps_index += BNFA_MAX_ALPHABET_SIZE; /* add in 256 transitions */
}
else
{
/* set the control word */
ps[ps_index] = nc<<BNFA_SPARSE_COUNT_SHIFT;
ps[ps_index] |= FailState[k]&BNFA_SPARSE_MAX_STATE;
if ( bnfa->bnfaMatchList[k] )
{
ps[ps_index] |= BNFA_SPARSE_MATCH_BIT;
}
ps_index++;
/* add in the transitions */
for ( m=0, i=0; i<bnfa->bnfaAlphabetSize && m<nc; i++ )
{
state = full[i] & BNFA_SPARSE_MAX_STATE;
if ( state != 0 )
{
ps[ps_index++] = (i<<BNFA_SPARSE_VALUE_SHIFT) | state;
m++;
}
}
}
}
/* sanity check we have not overflowed our buffer */
if ( ps_index > nps )
{
/* Fatal */
BNFA_FREE(pi,bnfa->bnfaNumStates*sizeof(bnfa_state_t),bnfa->nextstate_memory);
return -1;
}
/*
Replace Transition states with Transition Indices.
This allows us to skip using NextState[] to locate the next state
This limits us to <16M transitions due to 24 bit state sizes, and the fact
we have now converted next-state fields to next-index fields in this array,
and we have merged the next-state and state arrays.
*/
ps_index=0;
for (k=0; k< bnfa->bnfaNumStates; k++ )
{
if ( pi[k] >= nps )
{
/* Fatal */
BNFA_FREE(pi,bnfa->bnfaNumStates*sizeof(bnfa_state_t),bnfa->nextstate_memory);
return -1;
}
//ps_index = pi[k]; /* get index of next state */
ps_index++; /* skip state id */
/* Full Format */
if ( ps[ps_index] & BNFA_SPARSE_FULL_BIT )
{
/* Do the fail-state */
ps[ps_index] = ( ps[ps_index] & 0xff000000 ) |
( pi[ ps[ps_index] & BNFA_SPARSE_MAX_STATE ] );
ps_index++;
/* Do the transition-states */
for (i=0; i<BNFA_MAX_ALPHABET_SIZE; i++)
{
ps[ps_index] = ( ps[ps_index] & 0xff000000 ) |
( pi[ ps[ps_index] & BNFA_SPARSE_MAX_STATE ] );
ps_index++;
}
}
/* Sparse Format */
else
{
nc = (ps[ps_index] & BNFA_SPARSE_COUNT_BITS)>>BNFA_SPARSE_COUNT_SHIFT;
/* Do the cw = [cb | fail-state] */
ps[ps_index] = ( ps[ps_index] & 0xff000000 ) |
( pi[ ps[ps_index] & BNFA_SPARSE_MAX_STATE ] );
ps_index++;
/* Do the transition-states */
for (i=0; i<nc; i++)
{
ps[ps_index] = ( ps[ps_index] & 0xff000000 ) |
( pi[ ps[ps_index] & BNFA_SPARSE_MAX_STATE ] );
ps_index++;
}
}
/* check for buffer overflow again */
if ( ps_index > nps )
{
/* Fatal */
BNFA_FREE(pi,bnfa->bnfaNumStates*sizeof(bnfa_state_t),bnfa->nextstate_memory);
return -1;
}
}
BNFA_FREE(pi,bnfa->bnfaNumStates*sizeof(bnfa_state_t),bnfa->nextstate_memory);
return 0;
}
/*
* Print the state machine - rather verbose
*/
void bnfaPrint(bnfa_struct_t* bnfa)
{
int k;
bnfa_match_node_t** MatchList;
bnfa_match_node_t* mlist;
int ps_index=0;
bnfa_state_t* ps=nullptr;
if ( !bnfa )
return;
MatchList = bnfa->bnfaMatchList;
if ( !bnfa->bnfaNumStates )
return;
if ( bnfa->bnfaFormat ==BNFA_SPARSE )
{
printf("Print NFA-SPARSE state machine : %d active states\n", bnfa->bnfaNumStates);
ps = bnfa->bnfaTransList;
if ( !ps )
return;
}
#ifdef ALLOW_NFA_FULL
else if ( bnfa->bnfaFormat ==BNFA_FULL )
{
printf("Print NFA-FULL state machine : %d active states\n", bnfa->bnfaNumStates);
}
#endif
for (k=0; k<bnfa->bnfaNumStates; k++)
{
printf(" state %-4d fmt=%d ",k,bnfa->bnfaFormat);
if ( bnfa->bnfaFormat == BNFA_SPARSE )
{
unsigned i,cw,fs,nt,fb,mb;
ps_index++; /* skip state number */
cw = ps[ps_index]; /* control word */
fb = (cw & BNFA_SPARSE_FULL_BIT)>>BNFA_SPARSE_VALUE_SHIFT; /* full storage bit */
mb = (cw & BNFA_SPARSE_MATCH_BIT)>>BNFA_SPARSE_VALUE_SHIFT; /* matching state bit */
nt = (cw & BNFA_SPARSE_COUNT_BITS)>>BNFA_SPARSE_VALUE_SHIFT; /* number of transitions
0-63 */
fs = (cw & BNFA_SPARSE_MAX_STATE); /* fail state */
ps_index++; /* skip control word */
printf("mb=%3u fb=%3u fs=%-4u ",mb,fb,fs);
if ( fb )
{
printf(" nt=%-3d : ",bnfa->bnfaAlphabetSize);
for ( i=0; i<(unsigned)bnfa->bnfaAlphabetSize; i++, ps_index++ )
{
if ( ps[ps_index] == 0 )
continue;
if ( isascii((int)i) && isprint((int)i) )
printf("%3c->%-6d\t",i,ps[ps_index]);
else
printf("%3d->%-6d\t",i,ps[ps_index]);
}
}
else
{
printf(" nt=%-3d : ",nt);
for ( i=0; i<nt; i++, ps_index++ )
{
if ( isascii(ps[ps_index]>>BNFA_SPARSE_VALUE_SHIFT) &&
isprint(ps[ps_index]>>BNFA_SPARSE_VALUE_SHIFT) )
printf("%3c->%-6d\t",ps[ps_index]>>BNFA_SPARSE_VALUE_SHIFT,ps[ps_index] &
BNFA_SPARSE_MAX_STATE);
else
printf("%3d->%-6d\t",ps[ps_index]>>BNFA_SPARSE_VALUE_SHIFT,ps[ps_index] &
BNFA_SPARSE_MAX_STATE);
}
}
}
#ifdef ALLOW_NFA_FULL
else if ( bnfa->bnfaFormat == BNFA_FULL )
{
bnfa_state_t** NextState = (bnfa_state_t**)bnfa->bnfaNextState;
if ( !NextState )
continue;
bnfa_state_t* p = NextState[k];
printf("fs=%-4d nc=256 ",bnfa->bnfaFailState[k]);
for ( int i=0; i<bnfa->bnfaAlphabetSize; i++ )
{
bnfa_state_t state = p[i];
if ( state != 0 && state != BNFA_FAIL_STATE )
{
if ( isascii(i) && isprint(i) )
printf("%3c->%-5d\t",i,state);
else
printf("%3d->%-5d\t",i,state);
}
}
}
#endif
printf("\n");
if ( MatchList[k] )
printf("---MatchList For State %d\n",k);
for ( mlist = MatchList[k];
mlist!= nullptr;
mlist = mlist->next )
{
bnfa_pattern_t* pat;
pat = (bnfa_pattern_t*)mlist->data;
printf("---pattern : %.*s\n",pat->n,pat->casepatrn);
}
}
}
/*
* Create a new AC state machine
*/
bnfa_struct_t* bnfaNew(const MpseAgent* agent)
{
int bnfa_memory=0;
bnfa_struct_t* p = (bnfa_struct_t*)BNFA_MALLOC(sizeof(bnfa_struct_t),bnfa_memory);
if ( p )
{
p->bnfaOpt = 0;
p->bnfaCaseMode = BNFA_PER_PAT_CASE;
p->bnfaFormat = BNFA_SPARSE;
p->bnfaAlphabetSize = BNFA_MAX_ALPHABET_SIZE;
p->bnfaForceFullZeroState = 1;
p->bnfa_memory = sizeof(bnfa_struct_t);
p->agent = agent;
}
return p;
}
void bnfaSetOpt(bnfa_struct_t* p, int flag)
{
p->bnfaOpt=flag;
}
void bnfaSetCase(bnfa_struct_t* p, int flag)
{
if ( flag == BNFA_PER_PAT_CASE )
p->bnfaCaseMode = flag;
if ( flag == BNFA_CASE )
p->bnfaCaseMode = flag;
if ( flag == BNFA_NOCASE )
p->bnfaCaseMode = flag;
}
/*
* Fee all memory
*/
void bnfaFree(bnfa_struct_t* bnfa)
{
int i;
bnfa_pattern_t* patrn;
for (i = 0; i < bnfa->bnfaNumStates; i++)
{
/* free match list entries */
bnfa_match_node_t* mlist = bnfa->bnfaMatchList[i];
while (mlist)
{
bnfa_match_node_t* ilist = mlist;
mlist = mlist->next;
if (ilist->rule_option_tree && bnfa->agent)
{
bnfa->agent->tree_free(&(ilist->rule_option_tree));
}
if (ilist->neg_list && bnfa->agent)
{
bnfa->agent->list_free(&(ilist->neg_list));
}
BNFA_FREE(ilist,sizeof(bnfa_match_node_t),bnfa->matchlist_memory);
}
bnfa->bnfaMatchList[i] = nullptr;
#ifdef ALLOW_NFA_FULL
/* free next state entries */
if ( bnfa->bnfaFormat==BNFA_FULL ) /* Full format */
{
if ( bnfa->bnfaNextState[i] )
{
BNFA_FREE(bnfa->bnfaNextState[i],bnfa->bnfaAlphabetSize*sizeof(bnfa_state_t),
bnfa->nextstate_memory);
}
}
#endif
}
/* Free patterns */
patrn = bnfa->bnfaPatterns;
while (patrn)
{
bnfa_pattern_t* ipatrn=patrn;
patrn=patrn->next;
BNFA_FREE(ipatrn->casepatrn,ipatrn->n,bnfa->pat_memory);
if (bnfa->agent && ipatrn->userdata)
bnfa->agent->user_free(ipatrn->userdata);
BNFA_FREE(ipatrn,sizeof(bnfa_pattern_t),bnfa->pat_memory);
}
/* Free arrays */
BNFA_FREE(bnfa->bnfaFailState,bnfa->bnfaNumStates*sizeof(bnfa_state_t),bnfa->failstate_memory);
BNFA_FREE(bnfa->bnfaMatchList,bnfa->bnfaNumStates*sizeof(bnfa_pattern_t*),
bnfa->matchlist_memory);
BNFA_FREE(bnfa->bnfaNextState,bnfa->bnfaNumStates*sizeof(bnfa_state_t*),
bnfa->nextstate_memory);
BNFA_FREE(bnfa->bnfaTransList,(2*bnfa->bnfaNumStates+bnfa->bnfaNumTrans)*sizeof(bnfa_state_t),
bnfa->nextstate_memory);
snort_free(bnfa); /* cannot update memory tracker when deleting bnfa so just 'free' it !*/
}
/*
* Add a pattern to the pattern list
*/
int bnfaAddPattern(
bnfa_struct_t* p,
const uint8_t* pat,
unsigned n,
bool nocase,
bool negative,
void* userdata)
{
bnfa_pattern_t* plist;
plist = (bnfa_pattern_t*)BNFA_MALLOC(sizeof(bnfa_pattern_t),p->pat_memory);
if (!plist)
return -1;
plist->casepatrn = (uint8_t*)BNFA_MALLOC(n,p->pat_memory);
if (!plist->casepatrn)
{
BNFA_FREE(plist,sizeof(bnfa_pattern_t),p->pat_memory);
return -1;
}
memcpy (plist->casepatrn, pat, n);
plist->n = n;
plist->nocase = nocase;
plist->negative = negative;
plist->userdata = userdata;
plist->next = p->bnfaPatterns; /* insert at front of list */
p->bnfaPatterns = plist;
p->bnfaPatternCnt++;
return 0;
}
/*
* Compile the patterns into an nfa state machine
*/
static inline int _bnfaCompile(bnfa_struct_t* bnfa)
{
bnfa_pattern_t* plist;
bnfa_match_node_t** tmpMatchList;
unsigned cntMatchStates;
int i;
/* Count number of states */
for (plist = bnfa->bnfaPatterns; plist != nullptr; plist = plist->next)
{
bnfa->bnfaMaxStates += plist->n;
}
bnfa->bnfaMaxStates++; /* one extra */
/* Alloc a List based State Transition table */
bnfa->bnfaTransTable =(bnfa_trans_node_t**)BNFA_MALLOC(sizeof(bnfa_trans_node_t*) *
bnfa->bnfaMaxStates,bnfa->list_memory);
if (!bnfa->bnfaTransTable)
{
return -1;
}
/* Alloc a MatchList table - this has a list of pattern matches for each state */
bnfa->bnfaMatchList=(bnfa_match_node_t**)BNFA_MALLOC(sizeof(void*)*bnfa->bnfaMaxStates,
bnfa->matchlist_memory);
if (!bnfa->bnfaMatchList)
{
return -1;
}
/* Add each Pattern to the State Table - This forms a keyword trie using lists */
bnfa->bnfaNumStates = 0;
for (plist = bnfa->bnfaPatterns; plist != nullptr; plist = plist->next)
{
_bnfa_add_pattern_states (bnfa, plist);
}
bnfa->bnfaNumStates++;
if ( bnfa->bnfaNumStates > BNFA_SPARSE_MAX_STATE )
{
return -1; /* Call bnfaFree to clean up */
}
/* ReAlloc a smaller MatchList table - only need NumStates */
tmpMatchList=bnfa->bnfaMatchList;
bnfa->bnfaMatchList=(bnfa_match_node_t**)BNFA_MALLOC(sizeof(void*) * bnfa->bnfaNumStates,
bnfa->matchlist_memory);
if (!bnfa->bnfaMatchList)
{
return -1;
}
memcpy(bnfa->bnfaMatchList,tmpMatchList,sizeof(void*) * bnfa->bnfaNumStates);
BNFA_FREE(tmpMatchList,sizeof(void*) * bnfa->bnfaMaxStates,bnfa->matchlist_memory);
/* Alloc a failure state table - only need NumStates */
bnfa->bnfaFailState =(bnfa_state_t*)BNFA_MALLOC(sizeof(bnfa_state_t) * bnfa->bnfaNumStates,
bnfa->failstate_memory);
if (!bnfa->bnfaFailState)
{
return -1;
}
#ifdef ALLOW_NFA_FULL
if ( bnfa->bnfaFormat == BNFA_FULL )
{
/* Alloc a state transition table - only need NumStates */
bnfa->bnfaNextState=(bnfa_state_t**)BNFA_MALLOC(sizeof(bnfa_state_t*) *
bnfa->bnfaNumStates,bnfa->nextstate_memory);
if (!bnfa->bnfaNextState)
{
return -1;
}
}
#endif
/* Build the nfa w/failure states - time the nfa construction */
if ( _bnfa_build_nfa (bnfa) )
{
return -1;
}
/* Convert nfa storage format from list to full or sparse */
if ( bnfa->bnfaFormat == BNFA_SPARSE )
{
if ( _bnfa_conv_list_to_csparse_array(bnfa) )
{
return -1;
}
BNFA_FREE(bnfa->bnfaFailState,sizeof(bnfa_state_t)*bnfa->bnfaNumStates,
bnfa->failstate_memory);
bnfa->bnfaFailState=nullptr;
}
#ifdef ALLOW_NFA_FULL
else if ( bnfa->bnfaFormat == BNFA_FULL )
{
if ( _bnfa_conv_list_to_full(bnfa) )
{
return -1;
}
}
#endif
else
{
return -1;
}
/* Free up the Table Of Transition Lists */
_bnfa_list_free_table(bnfa);
/* Count states with Pattern Matches */
cntMatchStates=0;
for (i=0; i<bnfa->bnfaNumStates; i++)
{
if ( bnfa->bnfaMatchList[i] )
cntMatchStates++;
}
bnfa->bnfaMatchStates = cntMatchStates;
bnfaAccumInfo(bnfa);
return 0;
}
int bnfaCompile(SnortConfig* sc, bnfa_struct_t* bnfa)
{
if ( int rval = _bnfaCompile (bnfa) )
return rval;
if ( bnfa->agent )
bnfaBuildMatchStateTrees(sc, bnfa);
return 0;
}
/*
binary array search on sparse transition array
O(logN) search times..same as a binary tree.
data must be in sorted order in the array.
return: = -1 => not found
>= 0 => index of element 'val'
notes:
val is tested against the high 8 bits of the 'a' array entry,
this is particular to the storage format we are using.
*/
static inline int _bnfa_binearch(const bnfa_state_t* a, int a_len, int val)
{
int l = 0;
int r = a_len - 1;
while ( r >= l )
{
int m = ( r + l ) >> 1;
int c = a[m] >> BNFA_SPARSE_VALUE_SHIFT;
if ( val == c )
{
return m;
}
else if ( val < c )
{
r = m - 1;
}
else /* val > c */
{
l = m + 1;
}
}
return -1;
}
/*
* Sparse format for state table using single array storage
*
* word 1: state
* word 2: control-word = cb<<24| fs
* cb : control-byte
* : mb | fb | nt
* mb : bit 8 set if match state, zero otherwise
* fb : bit 7 set if using full format, zero otherwise
* nt : number of transitions 0..63 (more than 63 requires full format)
* fs: failure-transition-state
* word 3+: byte-value(0-255) << 24 | transition-state
*/
static inline unsigned _bnfa_get_next_state_csparse_nfa(
bnfa_state_t* pcx, unsigned sindex, unsigned input)
{
int k;
int nc;
int index;
bnfa_state_t* pcs;
for (;; )
{
pcs = pcx + sindex + 1; /* skip state-id == 1st word */
if ( pcs[0] & BNFA_SPARSE_FULL_BIT )
{
if ( sindex == 0 )
{
return pcs[1+input] & BNFA_SPARSE_MAX_STATE;
}
else
{
if ( pcs[1+input] & BNFA_SPARSE_MAX_STATE )
return pcs[1+input] & BNFA_SPARSE_MAX_STATE;
}
}
else
{
nc = (pcs[0]>>BNFA_SPARSE_COUNT_SHIFT) & BNFA_SPARSE_MAX_ROW_TRANSITIONS;
if ( nc > BNFA_SPARSE_LINEAR_SEARCH_LIMIT )
{
/* binary search... */
index = _bnfa_binearch(pcs+1, nc, input);
if ( index >= 0 )
{
return pcs[index+1] & BNFA_SPARSE_MAX_STATE;
}
}
else
{
/* linear search... */
for ( k=0; k<nc; k++ )
{
if ( (pcs[k+1]>>BNFA_SPARSE_VALUE_SHIFT) == input )
{
return pcs[k+1] & BNFA_SPARSE_MAX_STATE;
}
}
}
}
/* no transition found ... get the failure state and try again */
sindex = pcs[0] & BNFA_SPARSE_MAX_STATE;
}
}
/*
* Per Pattern case search, case is on per pattern basis
* standard snort search
*
*/
unsigned _bnfa_search_csparse_nfa(
bnfa_struct_t* bnfa, const uint8_t* Tx, int n, MpseMatch match,
void* context, unsigned sindex, int* current_state)
{
bnfa_match_node_t** MatchList = bnfa->bnfaMatchList;
bnfa_state_t* transList = bnfa->bnfaTransList;
unsigned nfound = 0;
unsigned last_match=LAST_STATE_INIT;
unsigned last_match_saved=LAST_STATE_INIT;
const uint8_t* T = Tx;
const uint8_t* Tend = T + n;
for (; T<Tend; T++)
{
uint8_t Tchar = xlatcase[ *T ];
/* Transition to next state index */
sindex = _bnfa_get_next_state_csparse_nfa(transList,sindex,Tchar);
/* Log matches in this state - if any */
if ( sindex && (transList[sindex+1] & BNFA_SPARSE_MATCH_BIT) )
{
if ( sindex == last_match )
continue;
last_match_saved = last_match;
last_match = sindex;
{
bnfa_match_node_t* mlist = MatchList[ transList[sindex] ];
if ( !mlist )
return nfound;
bnfa_pattern_t* patrn = (bnfa_pattern_t*)mlist->data;
unsigned index = T - Tx + 1;
nfound++;
/* Don't do anything specific for case sensitive patterns and not,
* since that will be covered by the rule tree itself. Each tree
* might have both case sensitive & case insensitive patterns.
*/
int res = match(patrn->userdata, mlist->rule_option_tree, index,
context, mlist->neg_list);
if ( res > 0 )
{
*current_state = sindex;
return nfound;
}
else if ( res < 0 )
{
last_match = last_match_saved;
}
}
}
}
*current_state = sindex;
return nfound;
}
int bnfaPatternCount(bnfa_struct_t* p)
{
return p->bnfaPatternCnt;
}
static bnfa_struct_t summary;
static int summary_cnt = 0;
static void bnfaPrintInfoEx(bnfa_struct_t* p)
{
unsigned max_memory;
if ( !p->bnfaNumStates )
{
return;
}
max_memory = p->bnfa_memory + p->pat_memory + p->list_memory +
p->matchlist_memory + p->failstate_memory + p->nextstate_memory;
LogCount("instances", summary_cnt);
LogCount("patterns", p->bnfaPatternCnt);
LogCount("pattern chars", p->bnfaMaxStates);
LogCount("num states", p->bnfaNumStates);
LogCount("num match states", p->bnfaMatchStates);
double scale;
if ( max_memory < 1024*1024 )
{
scale = 1024;
LogValue("memory scale", "KB");
}
else
{
scale = 1024 * 1024;
LogValue("memory scale", "MB");
}
LogStat("total memory", max_memory/scale);
LogStat("pattern memory", p->pat_memory/scale);
LogStat("match list memory", p->matchlist_memory/scale);
LogStat("transition memory", p->nextstate_memory/scale);
}
void bnfaPrintInfo(bnfa_struct_t* p)
{
bnfaPrint(p);
}
void bnfaPrintSummary()
{
bnfaPrintInfoEx(&summary);
}
void bnfaInitSummary()
{
summary_cnt=0;
memset(&summary,0,sizeof(bnfa_struct_t));
}
void bnfaAccumInfo(bnfa_struct_t* p)
{
bnfa_struct_t* px = &summary;
summary_cnt++;
px->bnfaAlphabetSize = p->bnfaAlphabetSize;
px->bnfaPatternCnt += p->bnfaPatternCnt;
px->bnfaMaxStates += p->bnfaMaxStates;
px->bnfaNumStates += p->bnfaNumStates;
px->bnfaNumTrans += p->bnfaNumTrans;
px->bnfaMatchStates += p->bnfaMatchStates;
px->bnfa_memory += p->bnfa_memory;
px->pat_memory += p->pat_memory;
px->list_memory += p->list_memory;
px->matchlist_memory += p->matchlist_memory;
px->nextstate_memory += p->nextstate_memory;
px->failstate_memory += p->failstate_memory;
}
| {
"pile_set_name": "Github"
} |
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!21 &2100000
Material:
serializedVersion: 6
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_Name: NoiseBall2
m_Shader: {fileID: 4800000, guid: dd34056461ef98446a6f7fec54e04bd7, type: 3}
m_ShaderKeywords:
m_LightmapFlags: 4
m_EnableInstancingVariants: 0
m_CustomRenderQueue: -1
stringTagMap: {}
disabledShaderPasses: []
m_SavedProperties:
serializedVersion: 3
m_TexEnvs:
- _BumpMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailAlbedoMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailMask:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailNormalMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _EmissionMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _MainTex:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _MetallicGlossMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _OcclusionMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _ParallaxMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
m_Floats:
- _BumpScale: 1
- _Cutoff: 0.5
- _DetailNormalMapScale: 1
- _DstBlend: 0
- _GlossMapScale: 1
- _Glossiness: 0.5
- _GlossyReflections: 1
- _Metallic: 0
- _Mode: 0
- _OcclusionStrength: 1
- _Parallax: 0.02
- _Smoothness: 0
- _SmoothnessTextureChannel: 0
- _SpecularHighlights: 1
- _SrcBlend: 1
- _UVSec: 0
- _ZWrite: 1
m_Colors:
- _Color: {r: 1, g: 1, b: 1, a: 1}
- _EmissionColor: {r: 0, g: 0, b: 0, a: 1}
| {
"pile_set_name": "Github"
} |
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
//! This trait contains miscellaneous features that have
//! not been carefully factored into other traits.
//!
//! FIXME: Things here need to be moved elsewhere.
use crate::cf_names::CFNamesExt;
use crate::errors::Result;
use crate::range::Range;
// FIXME: Find somewhere else to put this?
pub const MAX_DELETE_BATCH_COUNT: usize = 512;
pub trait MiscExt: CFNamesExt {
fn flush(&self, sync: bool) -> Result<()>;
fn flush_cf(&self, cf: &str, sync: bool) -> Result<()>;
fn delete_all_files_in_range(&self, start_key: &[u8], end_key: &[u8]) -> Result<()> {
if start_key >= end_key {
return Ok(());
}
for cf in self.cf_names() {
self.delete_files_in_range_cf(cf, start_key, end_key, false)?;
}
Ok(())
}
fn delete_files_in_range_cf(
&self,
cf: &str,
start_key: &[u8],
end_key: &[u8],
include_end: bool,
) -> Result<()>;
fn delete_blob_files_in_range(&self, start_key: &[u8], end_key: &[u8]) -> Result<()> {
if start_key >= end_key {
return Ok(());
}
for cf in self.cf_names() {
self.delete_blob_files_in_range_cf(cf, start_key, end_key, false)?;
}
Ok(())
}
fn delete_blob_files_in_range_cf(
&self,
cf: &str,
start_key: &[u8],
end_key: &[u8],
include_end: bool,
) -> Result<()>;
fn delete_all_in_range(
&self,
start_key: &[u8],
end_key: &[u8],
use_delete_range: bool,
) -> Result<()> {
if start_key >= end_key {
return Ok(());
}
for cf in self.cf_names() {
self.delete_all_in_range_cf(cf, start_key, end_key, use_delete_range)?;
}
Ok(())
}
fn delete_all_in_range_cf(
&self,
cf: &str,
start_key: &[u8],
end_key: &[u8],
use_delete_range: bool,
) -> Result<()>;
/// Return the approximate number of records and size in the range of memtables of the cf.
fn get_approximate_memtable_stats_cf(&self, cf: &str, range: &Range) -> Result<(u64, u64)>;
fn ingest_maybe_slowdown_writes(&self, cf: &str) -> Result<bool>;
/// Gets total used size of rocksdb engine, including:
/// * total size (bytes) of all SST files.
/// * total size (bytes) of active and unflushed immutable memtables.
/// * total size (bytes) of all blob files.
///
fn get_engine_used_size(&self) -> Result<u64>;
/// Roughly deletes files in multiple ranges.
///
/// Note:
/// - After this operation, some keys in the range might still exist in the database.
/// - After this operation, some keys in the range might be removed from existing snapshot,
/// so you shouldn't expect to be able to read data from the range using existing snapshots
/// any more.
///
/// Ref: https://github.com/facebook/rocksdb/wiki/Delete-A-Range-Of-Keys
fn roughly_cleanup_ranges(&self, ranges: &[(Vec<u8>, Vec<u8>)]) -> Result<()>;
fn path(&self) -> &str;
fn sync_wal(&self) -> Result<()>;
/// Check whether a database exists at a given path
fn exists(path: &str) -> bool;
/// Dump stats about the database into a string.
///
/// For debugging. The format and content is unspecified.
fn dump_stats(&self) -> Result<String>;
fn get_latest_sequence_number(&self) -> u64;
fn get_oldest_snapshot_sequence_number(&self) -> Option<u64>;
}
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) Tiny Technologies, Inc. All rights reserved.
* Licensed under the LGPL or a commercial license.
* For LGPL see License.txt in the project root for license information.
* For commercial licenses see https://www.tiny.cloud/
*/
import * as FilterContent from './FilterContent';
const setup = function (editor) {
editor.on('ResolveName', function (e) {
if (e.target.nodeName === 'IMG' && editor.dom.hasClass(e.target, FilterContent.getPageBreakClass())) {
e.name = 'pagebreak';
}
});
};
export {
setup
};
| {
"pile_set_name": "Github"
} |
#import <TargetConditionals.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wobjc-property-no-attribute"
#if TARGET_OS_IPHONE
@import UIKit;
#else
@import AppKit;
#endif
#import "MAIEnums.h"
#import "MAIDeclarations.h"
NS_ASSUME_NONNULL_BEGIN
@protocol MAIPasteboardProtocol
+(id<MAIPasteboardProtocol>)generalPasteboard;
+(id<MAIPasteboardProtocol>)pasteboardWithUniqueName;
@property(readonly, getter=name) NSString* name;
@property(readonly, getter=changeCount) NSInteger changeCount;
#if TARGET_OS_IPHONE
+(nullable id<MAIPasteboardProtocol>)pasteboardWithName:(NSString*)pasteboardName create:(BOOL)create NS_UNAVAILABLE;
+(void)removePasteboardWithName:(NSString*)pasteboardName NS_UNAVAILABLE;
#else
+(id<MAIPasteboardProtocol>)pasteboardWithName:(NSString*)name NS_UNAVAILABLE;
+(id<MAIPasteboardProtocol>)pasteboardByFilteringFile:(NSString*)filename NS_UNAVAILABLE;
+(id<MAIPasteboardProtocol>)pasteboardByFilteringData:(NSData*)data ofType:(NSString*)type NS_UNAVAILABLE;
+(id<MAIPasteboardProtocol>)pasteboardByFilteringTypesInPasteboard:(id<MAIPasteboardProtocol>)pboard NS_UNAVAILABLE;
#endif
@end
#if TARGET_OS_IPHONE
@interface MAIPasteboard : UIPasteboard<MAIPasteboardProtocol>
#else
@interface MAIPasteboard : NSPasteboard<MAIPasteboardProtocol>
#endif
@end
NS_ASSUME_NONNULL_END
#pragma clang diagnostic pop
| {
"pile_set_name": "Github"
} |
# ms.js
Ever find yourself doing math in your head or writing `1000 * 60 * 60 …`?
Don't want to add obstrusive `Number` prototype extensions to your reusable
/ distributable modules and projects?
`ms` is a tiny utility that you can leverage when your application needs to
accept a number of miliseconds as a parameter.
If a number is supplied to `ms`, it returns it immediately (e.g:
If a string that contains the number is supplied, it returns it immediately as
a number (e.g: it returns `100` for `'100'`).
However, if you pass a string with a number and a valid unit, hte number of
equivalent ms is returned.
```js
ms('1d') // 86400000
ms('10h') // 36000000
ms('2h') // 7200000
ms('1m') // 60000
ms('5ms') // 5000
ms('100') // '100'
ms(100) // 100
```
## How to use
### Node
```js
require('ms')
```
### Browser
```html
<script src="ms.js"></script>
```
## Credits
(The MIT License)
Copyright (c) 2011 Guillermo Rauch <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| {
"pile_set_name": "Github"
} |
// ------------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
// ------------------------------------------------------------------------------
package com.microsoft.graph.requests.extensions;
import com.microsoft.graph.http.IRequestBuilder;
import com.microsoft.graph.core.ClientException;
import com.microsoft.graph.concurrency.ICallback;
import com.microsoft.graph.models.generated.MailTipsType;
import com.microsoft.graph.models.extensions.MailTips;
import java.util.EnumSet;
import java.util.Arrays;
import java.util.EnumSet;
import com.microsoft.graph.requests.extensions.IUserGetMailTipsCollectionRequestBuilder;
import com.google.gson.JsonObject;
import com.microsoft.graph.http.IBaseCollectionPage;
// **NOTE** This file was generated by a tool and any changes will be overwritten.
/**
* The interface for the User Get Mail Tips Collection Page.
*/
public interface IUserGetMailTipsCollectionPage extends IBaseCollectionPage<MailTips, IUserGetMailTipsCollectionRequestBuilder> {
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 1990,1993 Regents of The University of Michigan.
* Copyright (c) 1999 Adrian Sun ([email protected])
* All Rights Reserved. See COPYRIGHT.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <atalk/standards.h>
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#ifdef HAVE_CRYPT_H
#include <crypt.h>
#endif /* ! HAVE_CRYPT_H */
#include <pwd.h>
#include <sys/time.h>
#include <time.h>
#ifdef SHADOWPW
#include <shadow.h>
#endif /* SHADOWPW */
#include <arpa/inet.h>
#include <atalk/afp.h>
#include <atalk/logger.h>
#include <atalk/uam.h>
#include <atalk/util.h>
#include <atalk/compat.h>
#define PASSWDLEN 8
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif /* MIN */
#ifdef TRU64
#include <sia.h>
#include <siad.h>
static const char *clientname;
#endif /* TRU64 */
/*XXX in etc/papd/file.h */
struct papfile;
extern UAM_MODULE_EXPORT void append(struct papfile *, const char *, int);
static int pwd_login(void *obj, char *username, int ulen, struct passwd **uam_pwd,
char *ibuf, size_t ibuflen,
char *rbuf _U_, size_t *rbuflen _U_)
{
char *p;
struct passwd *pwd;
int err = AFP_OK;
#ifdef SHADOWPW
struct spwd *sp;
#endif /* SHADOWPW */
#ifdef TRU64
if( uam_afpserver_option( obj, UAM_OPTION_CLIENTNAME,
(void *) &clientname, NULL ) < 0 )
return AFPERR_MISC;
#endif /* TRU64 */
if (ibuflen < PASSWDLEN) {
return( AFPERR_PARAM );
}
ibuf[ PASSWDLEN ] = '\0';
if (( pwd = uam_getname(obj, username, ulen)) == NULL ) {
return AFPERR_NOTAUTH;
}
LOG(log_info, logtype_uams, "cleartext login: %s", username);
if (uam_checkuser(pwd) < 0) {
LOG(log_info, logtype_uams, "not a valid user");
return AFPERR_NOTAUTH;
}
#ifdef SHADOWPW
if (( sp = getspnam( pwd->pw_name )) == NULL ) {
LOG(log_info, logtype_uams, "no shadow passwd entry for %s", username);
return AFPERR_NOTAUTH;
}
pwd->pw_passwd = sp->sp_pwdp;
if (sp->sp_max != -1 && sp->sp_lstchg) {
time_t now = time(NULL) / (60*60*24);
int32_t expire_days = sp->sp_lstchg - now + sp->sp_max;
if ( expire_days < 0 ) {
LOG(log_info, logtype_uams, "Password for user %s expired", username);
err = AFPERR_PWDEXPR;
}
}
#endif /* SHADOWPW */
if (!pwd->pw_passwd) {
return AFPERR_NOTAUTH;
}
*uam_pwd = pwd;
#ifdef TRU64
{
int ac;
char **av;
char hostname[256];
uam_afp_getcmdline( &ac, &av );
sprintf( hostname, "%s@%s", username, clientname );
if( uam_sia_validate_user( NULL, ac, av, hostname, username,
NULL, FALSE, NULL, ibuf ) != SIASUCCESS )
return AFPERR_NOTAUTH;
return err;
}
#else /* TRU64 */
p = crypt( ibuf, pwd->pw_passwd );
if ( strcmp( p, pwd->pw_passwd ) == 0 )
return err;
#endif /* TRU64 */
return AFPERR_NOTAUTH;
}
/* cleartxt login */
static int passwd_login(void *obj, struct passwd **uam_pwd,
char *ibuf, size_t ibuflen,
char *rbuf, size_t *rbuflen)
{
char *username;
size_t len, ulen;
*rbuflen = 0;
if (uam_afpserver_option(obj, UAM_OPTION_USERNAME,
(void *) &username, &ulen) < 0)
return AFPERR_MISC;
if (ibuflen < 2) {
return( AFPERR_PARAM );
}
len = (unsigned char) *ibuf++;
ibuflen--;
if (!len || len > ibuflen || len > ulen ) {
return( AFPERR_PARAM );
}
memcpy(username, ibuf, len );
ibuf += len;
ibuflen -=len;
username[ len ] = '\0';
if ((unsigned long) ibuf & 1) { /* pad character */
++ibuf;
ibuflen--;
}
return (pwd_login(obj, username, ulen, uam_pwd, ibuf, ibuflen, rbuf, rbuflen));
}
/* cleartxt login ext
* uname format :
byte 3
2 bytes len (network order)
len bytes unicode name
*/
static int passwd_login_ext(void *obj, char *uname, struct passwd **uam_pwd,
char *ibuf, size_t ibuflen,
char *rbuf, size_t *rbuflen)
{
char *username;
size_t len, ulen;
uint16_t temp16;
*rbuflen = 0;
if (uam_afpserver_option(obj, UAM_OPTION_USERNAME,
(void *) &username, &ulen) < 0)
return AFPERR_MISC;
if (*uname != 3)
return AFPERR_PARAM;
uname++;
memcpy(&temp16, uname, sizeof(temp16));
len = ntohs(temp16);
if (!len || len > ulen ) {
return( AFPERR_PARAM );
}
memcpy(username, uname +2, len );
username[ len ] = '\0';
return (pwd_login(obj, username, ulen, uam_pwd, ibuf, ibuflen, rbuf, rbuflen));
}
#if 0
/* change passwd */
static int passwd_changepw(void *obj, char *username,
struct passwd *pwd, char *ibuf,
size_t ibuflen, char *rbuf, size_t *rbuflen)
{
#ifdef SHADOWPW
struct spwd *sp;
#endif /* SHADOWPW */
char pw[PASSWDLEN + 1], *p;
uid_t uid = geteuid();
if (uam_checkuser(pwd) < 0)
return AFPERR_ACCESS;
/* old password */
memcpy(pw, ibuf, PASSWDLEN);
memset(ibuf, 0, PASSWDLEN);
pw[PASSWDLEN] = '\0';
#ifdef SHADOWPW
if (( sp = getspnam( pwd->pw_name )) == NULL ) {
LOG(log_info, logtype_uams, "no shadow passwd entry for %s", username);
return AFPERR_PARAM;
}
pwd->pw_passwd = sp->sp_pwdp;
#endif /* SHADOWPW */
p = crypt(pw, pwd->pw_passwd );
if (strcmp( p, pwd->pw_passwd )) {
memset(pw, 0, sizeof(pw));
return AFPERR_NOTAUTH;
}
/* new password */
ibuf += PASSWDLEN;
ibuf[PASSWDLEN] = '\0';
#ifdef SHADOWPW
#else /* SHADOWPW */
#endif /* SHADOWPW */
return AFP_OK;
}
#endif /* 0 */
/* Printer ClearTxtUAM login */
static int passwd_printer(char *start, char *stop, char *username, struct papfile *out)
{
struct passwd *pwd;
#ifdef SHADOWPW
struct spwd *sp;
#endif /* SHADOWPW */
char *data, *p, *q;
char password[PASSWDLEN + 1] = "\0";
static const char *loginok = "0\r";
int ulen;
data = (char *)malloc(stop - start + 1);
if (!data) {
LOG(log_info, logtype_uams,"Bad Login ClearTxtUAM: malloc");
return(-1);
}
strlcpy(data, start, stop - start + 1);
/* We are looking for the following format in data:
* (username) (password)
*
* Let's hope username doesn't contain ") ("!
*/
/* Parse input for username in () */
if ((p = strchr(data, '(' )) == NULL) {
LOG(log_info, logtype_uams,"Bad Login ClearTxtUAM: username not found in string");
free(data);
return(-1);
}
p++;
if ((q = strstr(p, ") (" )) == NULL) {
LOG(log_info, logtype_uams,"Bad Login ClearTxtUAM: username not found in string");
free(data);
return(-1);
}
memcpy(username, p, MIN( UAM_USERNAMELEN, q - p ));
/* Parse input for password in next () */
p = q + 3;
if ((q = strrchr(p , ')' )) == NULL) {
LOG(log_info, logtype_uams,"Bad Login ClearTxtUAM: password not found in string");
free(data);
return(-1);
}
memcpy(password, p, MIN(PASSWDLEN, q - p) );
/* Done copying username and password, clean up */
free(data);
ulen = strlen(username);
if (( pwd = uam_getname(NULL, username, ulen)) == NULL ) {
LOG(log_info, logtype_uams, "Bad Login ClearTxtUAM: ( %s ) not found ",
username);
return(-1);
}
if (uam_checkuser(pwd) < 0) {
/* syslog of error happens in uam_checkuser */
return(-1);
}
#ifdef SHADOWPW
if (( sp = getspnam( pwd->pw_name )) == NULL ) {
LOG(log_info, logtype_uams, "Bad Login ClearTxtUAM: no shadow passwd entry for %s",
username);
return(-1);
}
pwd->pw_passwd = sp->sp_pwdp;
if (sp->sp_max != -1 && sp->sp_lstchg) {
time_t now = time(NULL) / (60*60*24);
int32_t expire_days = sp->sp_lstchg - now + sp->sp_max;
if ( expire_days < 0 ) {
LOG(log_info, logtype_uams, "Password for user %s expired", username);
return (-1);
}
}
#endif /* SHADOWPW */
if (!pwd->pw_passwd) {
LOG(log_info, logtype_uams, "Bad Login ClearTxtUAM: no password for %s",
username);
return(-1);
}
#ifdef AFS
if ( kcheckuser( pwd, password) == 0)
return(0);
#endif /* AFS */
p = crypt(password, pwd->pw_passwd);
if (strcmp(p, pwd->pw_passwd) != 0) {
LOG(log_info, logtype_uams, "Bad Login ClearTxtUAM: %s: bad password", username);
return(-1);
}
/* Login successful */
append(out, loginok, strlen(loginok));
LOG(log_info, logtype_uams, "Login ClearTxtUAM: %s", username);
return(0);
}
static int uam_setup(void *obj, const char *path)
{
if (uam_register(UAM_SERVER_LOGIN_EXT, path, "Cleartxt Passwrd",
passwd_login, NULL, NULL, passwd_login_ext) < 0)
return -1;
if (uam_register(UAM_SERVER_PRINTAUTH, path, "ClearTxtUAM",
passwd_printer) < 0)
return -1;
return 0;
}
static void uam_cleanup(void)
{
uam_unregister(UAM_SERVER_LOGIN, "Cleartxt Passwrd");
uam_unregister(UAM_SERVER_PRINTAUTH, "ClearTxtUAM");
}
UAM_MODULE_EXPORT struct uam_export uams_clrtxt = {
UAM_MODULE_SERVER,
UAM_MODULE_VERSION,
uam_setup, uam_cleanup
};
UAM_MODULE_EXPORT struct uam_export uams_passwd = {
UAM_MODULE_SERVER,
UAM_MODULE_VERSION,
uam_setup, uam_cleanup
};
| {
"pile_set_name": "Github"
} |
/**
* Copyright (C) 2011 Brian Ferris <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onebusaway.transit_data.model;
public class PathBean extends ApplicationBean {
private static final long serialVersionUID = 1L;
private double[] _lat;
private double[] _lon;
public PathBean() {
}
public PathBean(double[] lat, double[] lon) {
_lat = lat;
_lon = lon;
}
public double[] getLat() {
return _lat;
}
public void setLat(double[] lat) {
_lat = lat;
}
public double[] getLon() {
return _lon;
}
public void setLon(double[] lon) {
_lon = lon;
}
}
| {
"pile_set_name": "Github"
} |
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class ExplosionForcer : MonoBehaviour
{
public float power;
public float radius;
public bool boostAudio;
private void Start()
{
if (boostAudio)
{
GetComponent<AudioSource>().PlayOneShot(GetComponent<AudioSource>().clip, 3);
}
Explode();
}
private void OnDrawGizmosSelected()
{
Gizmos.color = Color.red;
Gizmos.DrawWireSphere(transform.position, radius);
}
private void Explode()
{
Collider[] colliders = Physics.OverlapSphere(transform.position, radius);
List<Rigidbody> rigidbodyList = new List<Rigidbody>();
foreach (Collider collider in colliders)
{
Rigidbody attachedRigidbody = collider.attachedRigidbody;
if (attachedRigidbody != null)
{
if (rigidbodyList.Contains(attachedRigidbody) == false)
{
rigidbodyList.Add(attachedRigidbody);
}
}
}
foreach (Rigidbody rigidbody in rigidbodyList)
{
rigidbody.AddExplosionForce(power * 2, transform.position, radius, 0.6f);
rigidbody.SendMessage("Explosion", this, SendMessageOptions.DontRequireReceiver);
}
}
} | {
"pile_set_name": "Github"
} |
package metricsql
import (
"fmt"
"strconv"
"strings"
"sync"
)
// Parse parses MetricsQL query s.
//
// All the `WITH` expressions are expanded in the returned Expr.
//
// MetricsQL is backwards-compatible with PromQL.
func Parse(s string) (Expr, error) {
var p parser
p.lex.Init(s)
if err := p.lex.Next(); err != nil {
return nil, fmt.Errorf(`cannot find the first token: %s`, err)
}
e, err := p.parseExpr()
if err != nil {
return nil, fmt.Errorf(`%s; unparsed data: %q`, err, p.lex.Context())
}
if !isEOF(p.lex.Token) {
return nil, fmt.Errorf(`unparsed data left: %q`, p.lex.Context())
}
was := getDefaultWithArgExprs()
if e, err = expandWithExpr(was, e); err != nil {
return nil, fmt.Errorf(`cannot expand WITH expressions: %s`, err)
}
e = removeParensExpr(e)
e = simplifyConstants(e)
return e, nil
}
// Expr holds any of *Expr types.
type Expr interface {
// AppendString appends string representation of Expr to dst.
AppendString(dst []byte) []byte
}
func getDefaultWithArgExprs() []*withArgExpr {
defaultWithArgExprsOnce.Do(func() {
defaultWithArgExprs = prepareWithArgExprs([]string{
// ru - resource utilization
`ru(freev, maxv) = clamp_min(maxv - clamp_min(freev, 0), 0) / clamp_min(maxv, 0) * 100`,
// ttf - time to fuckup
`ttf(freev) = smooth_exponential(
clamp_max(clamp_max(-freev, 0) / clamp_max(deriv_fast(freev), 0), 365*24*3600),
clamp_max(step()/300, 1)
)`,
`median_over_time(m) = quantile_over_time(0.5, m)`,
`range_median(q) = range_quantile(0.5, q)`,
`alias(q, name) = label_set(q, "__name__", name)`,
})
})
return defaultWithArgExprs
}
var (
defaultWithArgExprs []*withArgExpr
defaultWithArgExprsOnce sync.Once
)
func prepareWithArgExprs(ss []string) []*withArgExpr {
was := make([]*withArgExpr, len(ss))
for i, s := range ss {
was[i] = mustParseWithArgExpr(s)
}
if err := checkDuplicateWithArgNames(was); err != nil {
panic(fmt.Errorf("BUG: %s", err))
}
return was
}
func checkDuplicateWithArgNames(was []*withArgExpr) error {
m := make(map[string]*withArgExpr, len(was))
for _, wa := range was {
if waOld := m[wa.Name]; waOld != nil {
return fmt.Errorf("duplicate `with` arg name for: %s; previous one: %s", wa, waOld.AppendString(nil))
}
m[wa.Name] = wa
}
return nil
}
func mustParseWithArgExpr(s string) *withArgExpr {
var p parser
p.lex.Init(s)
if err := p.lex.Next(); err != nil {
panic(fmt.Errorf("BUG: cannot find the first token in %q: %s", s, err))
}
wa, err := p.parseWithArgExpr()
if err != nil {
panic(fmt.Errorf("BUG: cannot parse %q: %s; unparsed data: %q", s, err, p.lex.Context()))
}
return wa
}
// removeParensExpr removes parensExpr for (Expr) case.
func removeParensExpr(e Expr) Expr {
if re, ok := e.(*RollupExpr); ok {
re.Expr = removeParensExpr(re.Expr)
return re
}
if be, ok := e.(*BinaryOpExpr); ok {
be.Left = removeParensExpr(be.Left)
be.Right = removeParensExpr(be.Right)
return be
}
if ae, ok := e.(*AggrFuncExpr); ok {
for i, arg := range ae.Args {
ae.Args[i] = removeParensExpr(arg)
}
return ae
}
if fe, ok := e.(*FuncExpr); ok {
for i, arg := range fe.Args {
fe.Args[i] = removeParensExpr(arg)
}
return fe
}
if pe, ok := e.(*parensExpr); ok {
args := *pe
for i, arg := range args {
args[i] = removeParensExpr(arg)
}
if len(*pe) == 1 {
return args[0]
}
// Treat parensExpr as a function with empty name, i.e. union()
fe := &FuncExpr{
Name: "",
Args: args,
}
return fe
}
return e
}
func simplifyConstants(e Expr) Expr {
if re, ok := e.(*RollupExpr); ok {
re.Expr = simplifyConstants(re.Expr)
return re
}
if ae, ok := e.(*AggrFuncExpr); ok {
simplifyConstantsInplace(ae.Args)
return ae
}
if fe, ok := e.(*FuncExpr); ok {
simplifyConstantsInplace(fe.Args)
return fe
}
if pe, ok := e.(*parensExpr); ok {
if len(*pe) == 1 {
return simplifyConstants((*pe)[0])
}
simplifyConstantsInplace(*pe)
return pe
}
be, ok := e.(*BinaryOpExpr)
if !ok {
return e
}
be.Left = simplifyConstants(be.Left)
be.Right = simplifyConstants(be.Right)
lne, lok := be.Left.(*NumberExpr)
rne, rok := be.Right.(*NumberExpr)
if lok && rok {
n := binaryOpEvalNumber(be.Op, lne.N, rne.N, be.Bool)
return &NumberExpr{
N: n,
}
}
// Check whether both operands are string literals.
lse, lok := be.Left.(*StringExpr)
rse, rok := be.Right.(*StringExpr)
if !lok || !rok {
return be
}
if be.Op == "+" {
// convert "foo" + "bar" to "foobar".
return &StringExpr{
S: lse.S + rse.S,
}
}
if !IsBinaryOpCmp(be.Op) {
return be
}
// Perform string comparisons.
ok = false
switch be.Op {
case "==":
ok = lse.S == rse.S
case "!=":
ok = lse.S != rse.S
case ">":
ok = lse.S > rse.S
case "<":
ok = lse.S < rse.S
case ">=":
ok = lse.S >= rse.S
case "<=":
ok = lse.S <= rse.S
default:
panic(fmt.Errorf("BUG: unexpected comparison binaryOp: %q", be.Op))
}
n := float64(0)
if ok {
n = 1
}
if !be.Bool && n == 0 {
n = nan
}
return &NumberExpr{
N: n,
}
}
func simplifyConstantsInplace(args []Expr) {
for i, arg := range args {
args[i] = simplifyConstants(arg)
}
}
// parser parses MetricsQL expression.
//
// preconditions for all parser.parse* funcs:
// - p.lex.Token should point to the first token to parse.
//
// postconditions for all parser.parse* funcs:
// - p.lex.Token should point to the next token after the parsed token.
type parser struct {
lex lexer
}
func isWith(s string) bool {
s = strings.ToLower(s)
return s == "with"
}
// parseWithExpr parses `WITH (withArgExpr...) expr`.
func (p *parser) parseWithExpr() (*withExpr, error) {
var we withExpr
if !isWith(p.lex.Token) {
return nil, fmt.Errorf("withExpr: unexpected token %q; want `WITH`", p.lex.Token)
}
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token != "(" {
return nil, fmt.Errorf(`withExpr: unexpected token %q; want "("`, p.lex.Token)
}
for {
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token == ")" {
goto end
}
wa, err := p.parseWithArgExpr()
if err != nil {
return nil, err
}
we.Was = append(we.Was, wa)
switch p.lex.Token {
case ",":
continue
case ")":
goto end
default:
return nil, fmt.Errorf(`withExpr: unexpected token %q; want ",", ")"`, p.lex.Token)
}
}
end:
if err := checkDuplicateWithArgNames(we.Was); err != nil {
return nil, err
}
if err := p.lex.Next(); err != nil {
return nil, err
}
e, err := p.parseExpr()
if err != nil {
return nil, err
}
we.Expr = e
return &we, nil
}
func (p *parser) parseWithArgExpr() (*withArgExpr, error) {
var wa withArgExpr
if !isIdentPrefix(p.lex.Token) {
return nil, fmt.Errorf(`withArgExpr: unexpected token %q; want "ident"`, p.lex.Token)
}
wa.Name = unescapeIdent(p.lex.Token)
if isAggrFunc(wa.Name) || IsRollupFunc(wa.Name) || IsTransformFunc(wa.Name) || isWith(wa.Name) {
return nil, fmt.Errorf(`withArgExpr: cannot use reserved name %q`, wa.Name)
}
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token == "(" {
// Parse func args.
args, err := p.parseIdentList()
if err != nil {
return nil, fmt.Errorf(`withArgExpr: cannot parse args for %q: %s`, wa.Name, err)
}
// Make sure all the args have different names
m := make(map[string]bool, len(args))
for _, arg := range args {
if m[arg] {
return nil, fmt.Errorf(`withArgExpr: duplicate func arg found in %q: %q`, wa.Name, arg)
}
m[arg] = true
}
wa.Args = args
}
if p.lex.Token != "=" {
return nil, fmt.Errorf(`withArgExpr: unexpected token %q; want "="`, p.lex.Token)
}
if err := p.lex.Next(); err != nil {
return nil, err
}
e, err := p.parseExpr()
if err != nil {
return nil, fmt.Errorf(`withArgExpr: cannot parse %q: %s`, wa.Name, err)
}
wa.Expr = e
return &wa, nil
}
func (p *parser) parseExpr() (Expr, error) {
e, err := p.parseSingleExpr()
if err != nil {
return nil, err
}
for {
if !isBinaryOp(p.lex.Token) {
return e, nil
}
var be BinaryOpExpr
be.Op = strings.ToLower(p.lex.Token)
be.Left = e
if err := p.lex.Next(); err != nil {
return nil, err
}
if isBinaryOpBoolModifier(p.lex.Token) {
if !IsBinaryOpCmp(be.Op) {
return nil, fmt.Errorf(`bool modifier cannot be applied to %q`, be.Op)
}
be.Bool = true
if err := p.lex.Next(); err != nil {
return nil, err
}
}
if isBinaryOpGroupModifier(p.lex.Token) {
if err := p.parseModifierExpr(&be.GroupModifier); err != nil {
return nil, err
}
if isBinaryOpJoinModifier(p.lex.Token) {
if isBinaryOpLogicalSet(be.Op) {
return nil, fmt.Errorf(`modifier %q cannot be applied to %q`, p.lex.Token, be.Op)
}
if err := p.parseModifierExpr(&be.JoinModifier); err != nil {
return nil, err
}
}
}
e2, err := p.parseSingleExpr()
if err != nil {
return nil, err
}
be.Right = e2
e = balanceBinaryOp(&be)
}
}
func balanceBinaryOp(be *BinaryOpExpr) Expr {
bel, ok := be.Left.(*BinaryOpExpr)
if !ok {
return be
}
lp := binaryOpPriority(bel.Op)
rp := binaryOpPriority(be.Op)
if rp < lp {
return be
}
if rp == lp && !isRightAssociativeBinaryOp(be.Op) {
return be
}
be.Left = bel.Right
bel.Right = balanceBinaryOp(be)
return bel
}
// parseSingleExpr parses non-binaryOp expressions.
func (p *parser) parseSingleExpr() (Expr, error) {
if isWith(p.lex.Token) {
err := p.lex.Next()
nextToken := p.lex.Token
p.lex.Prev()
if err == nil && nextToken == "(" {
return p.parseWithExpr()
}
}
e, err := p.parseSingleExprWithoutRollupSuffix()
if err != nil {
return nil, err
}
if p.lex.Token != "[" && !isOffset(p.lex.Token) {
// There is no rollup expression.
return e, nil
}
return p.parseRollupExpr(e)
}
func (p *parser) parseSingleExprWithoutRollupSuffix() (Expr, error) {
if isPositiveNumberPrefix(p.lex.Token) || isInfOrNaN(p.lex.Token) {
return p.parsePositiveNumberExpr()
}
if isStringPrefix(p.lex.Token) {
return p.parseStringExpr()
}
if isIdentPrefix(p.lex.Token) {
return p.parseIdentExpr()
}
switch p.lex.Token {
case "(":
return p.parseParensExpr()
case "{":
return p.parseMetricExpr()
case "-":
// Unary minus. Substitute `-expr` with `0 - expr`
if err := p.lex.Next(); err != nil {
return nil, err
}
e, err := p.parseSingleExpr()
if err != nil {
return nil, err
}
be := &BinaryOpExpr{
Op: "-",
Left: &NumberExpr{
N: 0,
},
Right: e,
}
return be, nil
case "+":
// Unary plus
if err := p.lex.Next(); err != nil {
return nil, err
}
return p.parseSingleExpr()
default:
return nil, fmt.Errorf(`singleExpr: unexpected token %q; want "(", "{", "-", "+"`, p.lex.Token)
}
}
func (p *parser) parsePositiveNumberExpr() (*NumberExpr, error) {
if !isPositiveNumberPrefix(p.lex.Token) && !isInfOrNaN(p.lex.Token) {
return nil, fmt.Errorf(`positiveNumberExpr: unexpected token %q; want "number"`, p.lex.Token)
}
var ne NumberExpr
if isSpecialIntegerPrefix(p.lex.Token) {
in, err := strconv.ParseInt(p.lex.Token, 0, 64)
if err != nil {
return nil, fmt.Errorf(`positiveNumberExpr: cannot parse integer %q: %s`, p.lex.Token, err)
}
ne.N = float64(in)
} else {
n, err := strconv.ParseFloat(p.lex.Token, 64)
if err != nil {
return nil, fmt.Errorf(`positiveNumberExpr: cannot parse %q: %s`, p.lex.Token, err)
}
ne.N = n
}
if err := p.lex.Next(); err != nil {
return nil, err
}
return &ne, nil
}
func (p *parser) parseStringExpr() (*StringExpr, error) {
var se StringExpr
for {
switch {
case isStringPrefix(p.lex.Token) || isIdentPrefix(p.lex.Token):
se.tokens = append(se.tokens, p.lex.Token)
default:
return nil, fmt.Errorf(`StringExpr: unexpected token %q; want "string"`, p.lex.Token)
}
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token != "+" {
return &se, nil
}
// composite StringExpr like `"s1" + "s2"`, `"s" + m()` or `"s" + m{}` or `"s" + unknownToken`.
if err := p.lex.Next(); err != nil {
return nil, err
}
if isStringPrefix(p.lex.Token) {
// "s1" + "s2"
continue
}
if !isIdentPrefix(p.lex.Token) {
// "s" + unknownToken
p.lex.Prev()
return &se, nil
}
// Look after ident
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token == "(" || p.lex.Token == "{" {
// `"s" + m(` or `"s" + m{`
p.lex.Prev()
p.lex.Prev()
return &se, nil
}
// "s" + ident
p.lex.Prev()
}
}
func (p *parser) parseParensExpr() (*parensExpr, error) {
if p.lex.Token != "(" {
return nil, fmt.Errorf(`parensExpr: unexpected token %q; want "("`, p.lex.Token)
}
var exprs []Expr
for {
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token == ")" {
break
}
expr, err := p.parseExpr()
if err != nil {
return nil, err
}
exprs = append(exprs, expr)
if p.lex.Token == "," {
continue
}
if p.lex.Token == ")" {
break
}
return nil, fmt.Errorf(`parensExpr: unexpected token %q; want "," or ")"`, p.lex.Token)
}
if err := p.lex.Next(); err != nil {
return nil, err
}
pe := parensExpr(exprs)
return &pe, nil
}
func (p *parser) parseAggrFuncExpr() (*AggrFuncExpr, error) {
if !isAggrFunc(p.lex.Token) {
return nil, fmt.Errorf(`AggrFuncExpr: unexpected token %q; want aggregate func`, p.lex.Token)
}
var ae AggrFuncExpr
ae.Name = strings.ToLower(unescapeIdent(p.lex.Token))
if err := p.lex.Next(); err != nil {
return nil, err
}
if isIdentPrefix(p.lex.Token) {
goto funcPrefixLabel
}
if p.lex.Token == "(" {
goto funcArgsLabel
}
return nil, fmt.Errorf(`AggrFuncExpr: unexpected token %q; want "("`, p.lex.Token)
funcPrefixLabel:
{
if !isAggrFuncModifier(p.lex.Token) {
return nil, fmt.Errorf(`AggrFuncExpr: unexpected token %q; want aggregate func modifier`, p.lex.Token)
}
if err := p.parseModifierExpr(&ae.Modifier); err != nil {
return nil, err
}
}
funcArgsLabel:
{
args, err := p.parseArgListExpr()
if err != nil {
return nil, err
}
ae.Args = args
// Verify whether func suffix exists.
if ae.Modifier.Op == "" && isAggrFuncModifier(p.lex.Token) {
if err := p.parseModifierExpr(&ae.Modifier); err != nil {
return nil, err
}
}
// Check for optional limit.
if strings.ToLower(p.lex.Token) == "limit" {
if err := p.lex.Next(); err != nil {
return nil, err
}
limit, err := strconv.Atoi(p.lex.Token)
if err != nil {
return nil, fmt.Errorf("cannot parse limit %q: %s", p.lex.Token, err)
}
if err := p.lex.Next(); err != nil {
return nil, err
}
ae.Limit = limit
}
return &ae, nil
}
}
func expandWithExpr(was []*withArgExpr, e Expr) (Expr, error) {
switch t := e.(type) {
case *BinaryOpExpr:
left, err := expandWithExpr(was, t.Left)
if err != nil {
return nil, err
}
right, err := expandWithExpr(was, t.Right)
if err != nil {
return nil, err
}
groupModifierArgs, err := expandModifierArgs(was, t.GroupModifier.Args)
if err != nil {
return nil, err
}
joinModifierArgs, err := expandModifierArgs(was, t.JoinModifier.Args)
if err != nil {
return nil, err
}
if t.Op == "+" {
lse, lok := left.(*StringExpr)
rse, rok := right.(*StringExpr)
if lok && rok {
se := &StringExpr{
S: lse.S + rse.S,
}
return se, nil
}
}
be := &BinaryOpExpr{
Op: t.Op,
Bool: t.Bool,
GroupModifier: t.GroupModifier,
JoinModifier: t.JoinModifier,
Left: left,
Right: right,
}
be.GroupModifier.Args = groupModifierArgs
be.JoinModifier.Args = joinModifierArgs
pe := parensExpr{be}
return &pe, nil
case *FuncExpr:
args, err := expandWithArgs(was, t.Args)
if err != nil {
return nil, err
}
wa := getWithArgExpr(was, t.Name)
if wa == nil {
fe := &FuncExpr{
Name: t.Name,
Args: args,
}
return fe, nil
}
return expandWithExprExt(was, wa, args)
case *AggrFuncExpr:
args, err := expandWithArgs(was, t.Args)
if err != nil {
return nil, err
}
modifierArgs, err := expandModifierArgs(was, t.Modifier.Args)
if err != nil {
return nil, err
}
ae := &AggrFuncExpr{
Name: t.Name,
Args: args,
Modifier: t.Modifier,
Limit: t.Limit,
}
ae.Modifier.Args = modifierArgs
return ae, nil
case *parensExpr:
exprs, err := expandWithArgs(was, *t)
if err != nil {
return nil, err
}
pe := parensExpr(exprs)
return &pe, nil
case *StringExpr:
if len(t.S) > 0 {
// Already expanded.
return t, nil
}
var b []byte
for _, token := range t.tokens {
if isStringPrefix(token) {
s, err := extractStringValue(token)
if err != nil {
return nil, err
}
b = append(b, s...)
continue
}
wa := getWithArgExpr(was, token)
if wa == nil {
return nil, fmt.Errorf("missing %q value inside StringExpr", token)
}
eNew, err := expandWithExprExt(was, wa, nil)
if err != nil {
return nil, err
}
seSrc, ok := eNew.(*StringExpr)
if !ok {
return nil, fmt.Errorf("%q must be string expression; got %q", token, eNew.AppendString(nil))
}
if len(seSrc.tokens) > 0 {
panic(fmt.Errorf("BUG: seSrc.tokens must be empty; got %q", seSrc.tokens))
}
b = append(b, seSrc.S...)
}
se := &StringExpr{
S: string(b),
}
return se, nil
case *RollupExpr:
eNew, err := expandWithExpr(was, t.Expr)
if err != nil {
return nil, err
}
re := *t
re.Expr = eNew
return &re, nil
case *withExpr:
wasNew := make([]*withArgExpr, 0, len(was)+len(t.Was))
wasNew = append(wasNew, was...)
wasNew = append(wasNew, t.Was...)
eNew, err := expandWithExpr(wasNew, t.Expr)
if err != nil {
return nil, err
}
return eNew, nil
case *MetricExpr:
if len(t.LabelFilters) > 0 {
// Already expanded.
return t, nil
}
{
var me MetricExpr
// Populate me.LabelFilters
for _, lfe := range t.labelFilters {
if lfe.Value == nil {
// Expand lfe.Label into []LabelFilter.
wa := getWithArgExpr(was, lfe.Label)
if wa == nil {
return nil, fmt.Errorf("missing %q value inside %q", lfe.Label, t.AppendString(nil))
}
eNew, err := expandWithExprExt(was, wa, nil)
if err != nil {
return nil, err
}
wme, ok := eNew.(*MetricExpr)
if !ok || wme.hasNonEmptyMetricGroup() {
return nil, fmt.Errorf("%q must be filters expression inside %q; got %q", lfe.Label, t.AppendString(nil), eNew.AppendString(nil))
}
if len(wme.labelFilters) > 0 {
panic(fmt.Errorf("BUG: wme.labelFilters must be empty; got %s", wme.labelFilters))
}
me.LabelFilters = append(me.LabelFilters, wme.LabelFilters...)
continue
}
// convert lfe to LabelFilter.
se, err := expandWithExpr(was, lfe.Value)
if err != nil {
return nil, err
}
var lfeNew labelFilterExpr
lfeNew.Label = lfe.Label
lfeNew.Value = se.(*StringExpr)
lfeNew.IsNegative = lfe.IsNegative
lfeNew.IsRegexp = lfe.IsRegexp
lf, err := lfeNew.toLabelFilter()
if err != nil {
return nil, err
}
me.LabelFilters = append(me.LabelFilters, *lf)
}
me.LabelFilters = removeDuplicateLabelFilters(me.LabelFilters)
t = &me
}
if !t.hasNonEmptyMetricGroup() {
return t, nil
}
k := t.LabelFilters[0].Value
wa := getWithArgExpr(was, k)
if wa == nil {
return t, nil
}
eNew, err := expandWithExprExt(was, wa, nil)
if err != nil {
return nil, err
}
var wme *MetricExpr
re, _ := eNew.(*RollupExpr)
if re != nil {
wme, _ = re.Expr.(*MetricExpr)
} else {
wme, _ = eNew.(*MetricExpr)
}
if wme == nil {
if !t.isOnlyMetricGroup() {
return nil, fmt.Errorf("cannot expand %q to non-metric expression %q", t.AppendString(nil), eNew.AppendString(nil))
}
return eNew, nil
}
if len(wme.labelFilters) > 0 {
panic(fmt.Errorf("BUG: wme.labelFilters must be empty; got %s", wme.labelFilters))
}
var me MetricExpr
me.LabelFilters = append(me.LabelFilters, wme.LabelFilters...)
me.LabelFilters = append(me.LabelFilters, t.LabelFilters[1:]...)
me.LabelFilters = removeDuplicateLabelFilters(me.LabelFilters)
if re == nil {
return &me, nil
}
reNew := *re
reNew.Expr = &me
return &reNew, nil
default:
return e, nil
}
}
func expandWithArgs(was []*withArgExpr, args []Expr) ([]Expr, error) {
dstArgs := make([]Expr, len(args))
for i, arg := range args {
dstArg, err := expandWithExpr(was, arg)
if err != nil {
return nil, err
}
dstArgs[i] = dstArg
}
return dstArgs, nil
}
func expandModifierArgs(was []*withArgExpr, args []string) ([]string, error) {
if len(args) == 0 {
return nil, nil
}
dstArgs := make([]string, 0, len(args))
for _, arg := range args {
wa := getWithArgExpr(was, arg)
if wa == nil {
// Leave the arg as is.
dstArgs = append(dstArgs, arg)
continue
}
if len(wa.Args) > 0 {
// Template funcs cannot be used inside modifier list. Leave the arg as is.
dstArgs = append(dstArgs, arg)
continue
}
me, ok := wa.Expr.(*MetricExpr)
if ok {
if !me.isOnlyMetricGroup() {
return nil, fmt.Errorf("cannot use %q instead of %q in %s", me.AppendString(nil), arg, args)
}
dstArg := me.LabelFilters[0].Value
dstArgs = append(dstArgs, dstArg)
continue
}
pe, ok := wa.Expr.(*parensExpr)
if ok {
for _, pArg := range *pe {
me, ok := pArg.(*MetricExpr)
if !ok || !me.isOnlyMetricGroup() {
return nil, fmt.Errorf("cannot use %q instead of %q in %s", pe.AppendString(nil), arg, args)
}
dstArg := me.LabelFilters[0].Value
dstArgs = append(dstArgs, dstArg)
}
continue
}
return nil, fmt.Errorf("cannot use %q instead of %q in %s", wa.Expr.AppendString(nil), arg, args)
}
// Remove duplicate args from dstArgs
m := make(map[string]bool, len(dstArgs))
filteredArgs := dstArgs[:0]
for _, arg := range dstArgs {
if !m[arg] {
filteredArgs = append(filteredArgs, arg)
m[arg] = true
}
}
return filteredArgs, nil
}
func expandWithExprExt(was []*withArgExpr, wa *withArgExpr, args []Expr) (Expr, error) {
if len(wa.Args) != len(args) {
if args == nil {
// Just return MetricExpr with the wa.Name name.
return newMetricExpr(wa.Name), nil
}
return nil, fmt.Errorf("invalid number of args for %q; got %d; want %d", wa.Name, len(args), len(wa.Args))
}
wasNew := make([]*withArgExpr, 0, len(was)+len(args))
for _, waTmp := range was {
if waTmp == wa {
break
}
wasNew = append(wasNew, waTmp)
}
for i, arg := range args {
wasNew = append(wasNew, &withArgExpr{
Name: wa.Args[i],
Expr: arg,
})
}
return expandWithExpr(wasNew, wa.Expr)
}
func newMetricExpr(name string) *MetricExpr {
return &MetricExpr{
LabelFilters: []LabelFilter{{
Label: "__name__",
Value: name,
}},
}
}
func extractStringValue(token string) (string, error) {
if !isStringPrefix(token) {
return "", fmt.Errorf(`StringExpr must contain only string literals; got %q`, token)
}
// See https://prometheus.io/docs/prometheus/latest/querying/basics/#string-literals
if token[0] == '\'' {
if len(token) < 2 || token[len(token)-1] != '\'' {
return "", fmt.Errorf(`string literal contains unexpected trailing char; got %q`, token)
}
token = token[1 : len(token)-1]
token = strings.Replace(token, "\\'", "'", -1)
token = strings.Replace(token, `"`, `\"`, -1)
token = `"` + token + `"`
}
s, err := strconv.Unquote(token)
if err != nil {
return "", fmt.Errorf(`cannot parse string literal %q: %s`, token, err)
}
return s, nil
}
func removeDuplicateLabelFilters(lfs []LabelFilter) []LabelFilter {
lfsm := make(map[string]bool, len(lfs))
lfsNew := lfs[:0]
var buf []byte
for i := range lfs {
lf := &lfs[i]
buf = lf.AppendString(buf[:0])
if lfsm[string(buf)] {
continue
}
lfsm[string(buf)] = true
lfsNew = append(lfsNew, *lf)
}
return lfsNew
}
func (p *parser) parseFuncExpr() (*FuncExpr, error) {
if !isIdentPrefix(p.lex.Token) {
return nil, fmt.Errorf(`FuncExpr: unexpected token %q; want "ident"`, p.lex.Token)
}
var fe FuncExpr
fe.Name = unescapeIdent(p.lex.Token)
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token != "(" {
return nil, fmt.Errorf(`FuncExpr; unexpected token %q; want "("`, p.lex.Token)
}
args, err := p.parseArgListExpr()
if err != nil {
return nil, err
}
fe.Args = args
return &fe, nil
}
func (p *parser) parseModifierExpr(me *ModifierExpr) error {
if !isIdentPrefix(p.lex.Token) {
return fmt.Errorf(`ModifierExpr: unexpected token %q; want "ident"`, p.lex.Token)
}
me.Op = strings.ToLower(p.lex.Token)
if err := p.lex.Next(); err != nil {
return err
}
if isBinaryOpJoinModifier(me.Op) && p.lex.Token != "(" {
// join modifier may miss ident list.
return nil
}
args, err := p.parseIdentList()
if err != nil {
return err
}
me.Args = args
return nil
}
func (p *parser) parseIdentList() ([]string, error) {
if p.lex.Token != "(" {
return nil, fmt.Errorf(`identList: unexpected token %q; want "("`, p.lex.Token)
}
var idents []string
for {
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token == ")" {
goto closeParensLabel
}
if !isIdentPrefix(p.lex.Token) {
return nil, fmt.Errorf(`identList: unexpected token %q; want "ident"`, p.lex.Token)
}
idents = append(idents, unescapeIdent(p.lex.Token))
if err := p.lex.Next(); err != nil {
return nil, err
}
switch p.lex.Token {
case ",":
continue
case ")":
goto closeParensLabel
default:
return nil, fmt.Errorf(`identList: unexpected token %q; want ",", ")"`, p.lex.Token)
}
}
closeParensLabel:
if err := p.lex.Next(); err != nil {
return nil, err
}
return idents, nil
}
func (p *parser) parseArgListExpr() ([]Expr, error) {
if p.lex.Token != "(" {
return nil, fmt.Errorf(`argList: unexpected token %q; want "("`, p.lex.Token)
}
var args []Expr
for {
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token == ")" {
goto closeParensLabel
}
expr, err := p.parseExpr()
if err != nil {
return nil, err
}
args = append(args, expr)
switch p.lex.Token {
case ",":
continue
case ")":
goto closeParensLabel
default:
return nil, fmt.Errorf(`argList: unexpected token %q; want ",", ")"`, p.lex.Token)
}
}
closeParensLabel:
if err := p.lex.Next(); err != nil {
return nil, err
}
return args, nil
}
func getWithArgExpr(was []*withArgExpr, name string) *withArgExpr {
// Scan wes backwards, since certain expressions may override
// previously defined expressions
for i := len(was) - 1; i >= 0; i-- {
wa := was[i]
if wa.Name == name {
return wa
}
}
return nil
}
func (p *parser) parseLabelFilters() ([]*labelFilterExpr, error) {
if p.lex.Token != "{" {
return nil, fmt.Errorf(`labelFilters: unexpected token %q; want "{"`, p.lex.Token)
}
var lfes []*labelFilterExpr
for {
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token == "}" {
goto closeBracesLabel
}
lfe, err := p.parseLabelFilterExpr()
if err != nil {
return nil, err
}
lfes = append(lfes, lfe)
switch p.lex.Token {
case ",":
continue
case "}":
goto closeBracesLabel
default:
return nil, fmt.Errorf(`labelFilters: unexpected token %q; want ",", "}"`, p.lex.Token)
}
}
closeBracesLabel:
if err := p.lex.Next(); err != nil {
return nil, err
}
return lfes, nil
}
func (p *parser) parseLabelFilterExpr() (*labelFilterExpr, error) {
if !isIdentPrefix(p.lex.Token) {
return nil, fmt.Errorf(`labelFilterExpr: unexpected token %q; want "ident"`, p.lex.Token)
}
var lfe labelFilterExpr
lfe.Label = unescapeIdent(p.lex.Token)
if err := p.lex.Next(); err != nil {
return nil, err
}
switch p.lex.Token {
case "=":
// Nothing to do.
case "!=":
lfe.IsNegative = true
case "=~":
lfe.IsRegexp = true
case "!~":
lfe.IsNegative = true
lfe.IsRegexp = true
case ",", "}":
return &lfe, nil
default:
return nil, fmt.Errorf(`labelFilterExpr: unexpected token %q; want "=", "!=", "=~", "!~", ",", "}"`, p.lex.Token)
}
if err := p.lex.Next(); err != nil {
return nil, err
}
se, err := p.parseStringExpr()
if err != nil {
return nil, err
}
lfe.Value = se
return &lfe, nil
}
// labelFilterExpr represents `foo <op> "bar"` expression, where <op> is `=`, `!=`, `=~` or `!~`.
//
// This type isn't exported.
type labelFilterExpr struct {
Label string
Value *StringExpr
IsRegexp bool
IsNegative bool
}
func (lfe *labelFilterExpr) String() string {
return fmt.Sprintf("[label=%q, value=%+v, isRegexp=%v, isNegative=%v]", lfe.Label, lfe.Value, lfe.IsRegexp, lfe.IsNegative)
}
func (lfe *labelFilterExpr) toLabelFilter() (*LabelFilter, error) {
if lfe.Value == nil || len(lfe.Value.tokens) > 0 {
panic(fmt.Errorf("BUG: lfe.Value must be already expanded; got %v", lfe.Value))
}
var lf LabelFilter
lf.Label = lfe.Label
lf.Value = lfe.Value.S
lf.IsRegexp = lfe.IsRegexp
lf.IsNegative = lfe.IsNegative
if !lf.IsRegexp {
return &lf, nil
}
// Verify regexp.
if _, err := CompileRegexpAnchored(lfe.Value.S); err != nil {
return nil, fmt.Errorf("invalid regexp in %s=%q: %s", lf.Label, lf.Value, err)
}
return &lf, nil
}
func (p *parser) parseWindowAndStep() (string, string, bool, error) {
if p.lex.Token != "[" {
return "", "", false, fmt.Errorf(`windowAndStep: unexpected token %q; want "["`, p.lex.Token)
}
err := p.lex.Next()
if err != nil {
return "", "", false, err
}
var window string
if !strings.HasPrefix(p.lex.Token, ":") {
window, err = p.parsePositiveDuration()
if err != nil {
return "", "", false, err
}
}
var step string
inheritStep := false
if strings.HasPrefix(p.lex.Token, ":") {
// Parse step
p.lex.Token = p.lex.Token[1:]
if p.lex.Token == "" {
if err := p.lex.Next(); err != nil {
return "", "", false, err
}
if p.lex.Token == "]" {
inheritStep = true
}
}
if p.lex.Token != "]" {
step, err = p.parsePositiveDuration()
if err != nil {
return "", "", false, err
}
}
}
if p.lex.Token != "]" {
return "", "", false, fmt.Errorf(`windowAndStep: unexpected token %q; want "]"`, p.lex.Token)
}
if err := p.lex.Next(); err != nil {
return "", "", false, err
}
return window, step, inheritStep, nil
}
func (p *parser) parseOffset() (string, error) {
if !isOffset(p.lex.Token) {
return "", fmt.Errorf(`offset: unexpected token %q; want "offset"`, p.lex.Token)
}
if err := p.lex.Next(); err != nil {
return "", err
}
d, err := p.parseDuration()
if err != nil {
return "", err
}
return d, nil
}
func (p *parser) parseDuration() (string, error) {
isNegative := false
if p.lex.Token == "-" {
isNegative = true
if err := p.lex.Next(); err != nil {
return "", err
}
}
if !isPositiveDuration(p.lex.Token) {
return "", fmt.Errorf(`duration: unexpected token %q; want "duration"`, p.lex.Token)
}
d := p.lex.Token
if err := p.lex.Next(); err != nil {
return "", err
}
if isNegative {
d = "-" + d
}
return d, nil
}
func (p *parser) parsePositiveDuration() (string, error) {
d, err := p.parseDuration()
if err != nil {
return "", err
}
if strings.HasPrefix(d, "-") {
return "", fmt.Errorf("positiveDuration: expecting positive duration; got %q", d)
}
return d, nil
}
// parseIdentExpr parses expressions starting with `ident` token.
func (p *parser) parseIdentExpr() (Expr, error) {
// Look into the next-next token in order to determine how to parse
// the current expression.
if err := p.lex.Next(); err != nil {
return nil, err
}
if isEOF(p.lex.Token) || isOffset(p.lex.Token) {
p.lex.Prev()
return p.parseMetricExpr()
}
if isIdentPrefix(p.lex.Token) {
p.lex.Prev()
if isAggrFunc(p.lex.Token) {
return p.parseAggrFuncExpr()
}
return p.parseMetricExpr()
}
if isBinaryOp(p.lex.Token) {
p.lex.Prev()
return p.parseMetricExpr()
}
switch p.lex.Token {
case "(":
p.lex.Prev()
if isAggrFunc(p.lex.Token) {
return p.parseAggrFuncExpr()
}
return p.parseFuncExpr()
case "{", "[", ")", ",":
p.lex.Prev()
return p.parseMetricExpr()
default:
return nil, fmt.Errorf(`identExpr: unexpected token %q; want "(", "{", "[", ")", ","`, p.lex.Token)
}
}
func (p *parser) parseMetricExpr() (*MetricExpr, error) {
var me MetricExpr
if isIdentPrefix(p.lex.Token) {
var lfe labelFilterExpr
lfe.Label = "__name__"
lfe.Value = &StringExpr{
tokens: []string{strconv.Quote(unescapeIdent(p.lex.Token))},
}
me.labelFilters = append(me.labelFilters[:0], &lfe)
if err := p.lex.Next(); err != nil {
return nil, err
}
if p.lex.Token != "{" {
return &me, nil
}
}
lfes, err := p.parseLabelFilters()
if err != nil {
return nil, err
}
me.labelFilters = append(me.labelFilters, lfes...)
return &me, nil
}
func (p *parser) parseRollupExpr(arg Expr) (Expr, error) {
var re RollupExpr
re.Expr = arg
if p.lex.Token == "[" {
window, step, inheritStep, err := p.parseWindowAndStep()
if err != nil {
return nil, err
}
re.Window = window
re.Step = step
re.InheritStep = inheritStep
if !isOffset(p.lex.Token) {
return &re, nil
}
}
offset, err := p.parseOffset()
if err != nil {
return nil, err
}
re.Offset = offset
return &re, nil
}
// StringExpr represents string expression.
type StringExpr struct {
// S contains unquoted value for string expression.
S string
// Composite string has non-empty tokens.
// They must be converted into S by expandWithExpr.
tokens []string
}
// AppendString appends string representation of se to dst and returns the result.
func (se *StringExpr) AppendString(dst []byte) []byte {
return strconv.AppendQuote(dst, se.S)
}
// NumberExpr represents number expression.
type NumberExpr struct {
// N is the parsed number, i.e. `1.23`, `-234`, etc.
N float64
}
// AppendString appends string representation of ne to dst and returns the result.
func (ne *NumberExpr) AppendString(dst []byte) []byte {
return strconv.AppendFloat(dst, ne.N, 'g', -1, 64)
}
// parensExpr represents `(...)`.
//
// It isn't exported.
type parensExpr []Expr
// AppendString appends string representation of pe to dst and returns the result.
func (pe parensExpr) AppendString(dst []byte) []byte {
return appendStringArgListExpr(dst, pe)
}
// BinaryOpExpr represents binary operation.
type BinaryOpExpr struct {
// Op is the operation itself, i.e. `+`, `-`, `*`, etc.
Op string
// Bool indicates whether `bool` modifier is present.
// For example, `foo >bool bar`.
Bool bool
// GroupModifier contains modifier such as "on" or "ignoring".
GroupModifier ModifierExpr
// JoinModifier contains modifier such as "group_left" or "group_right".
JoinModifier ModifierExpr
// Left contains left arg for the `left op right` expression.
Left Expr
// Right contains right arg for the `left op right` epxression.
Right Expr
}
// AppendString appends string representation of be to dst and returns the result.
func (be *BinaryOpExpr) AppendString(dst []byte) []byte {
if _, ok := be.Left.(*BinaryOpExpr); ok {
dst = append(dst, '(')
dst = be.Left.AppendString(dst)
dst = append(dst, ')')
} else {
dst = be.Left.AppendString(dst)
}
dst = append(dst, ' ')
dst = append(dst, be.Op...)
if be.Bool {
dst = append(dst, " bool"...)
}
if be.GroupModifier.Op != "" {
dst = append(dst, ' ')
dst = be.GroupModifier.AppendString(dst)
}
if be.JoinModifier.Op != "" {
dst = append(dst, ' ')
dst = be.JoinModifier.AppendString(dst)
}
dst = append(dst, ' ')
if _, ok := be.Right.(*BinaryOpExpr); ok {
dst = append(dst, '(')
dst = be.Right.AppendString(dst)
dst = append(dst, ')')
} else {
dst = be.Right.AppendString(dst)
}
return dst
}
// ModifierExpr represents MetricsQL modifier such as `<op> (...)`
type ModifierExpr struct {
// Op is modifier operation.
Op string
// Args contains modifier args from parens.
Args []string
}
// AppendString appends string representation of me to dst and returns the result.
func (me *ModifierExpr) AppendString(dst []byte) []byte {
dst = append(dst, me.Op...)
dst = append(dst, " ("...)
for i, arg := range me.Args {
dst = appendEscapedIdent(dst, arg)
if i+1 < len(me.Args) {
dst = append(dst, ", "...)
}
}
dst = append(dst, ')')
return dst
}
func appendStringArgListExpr(dst []byte, args []Expr) []byte {
dst = append(dst, '(')
for i, arg := range args {
dst = arg.AppendString(dst)
if i+1 < len(args) {
dst = append(dst, ", "...)
}
}
dst = append(dst, ')')
return dst
}
// FuncExpr represetns MetricsQL function such as `foo(...)`
type FuncExpr struct {
// Name is function name.
Name string
// Args contains function args.
Args []Expr
}
// AppendString appends string representation of fe to dst and returns the result.
func (fe *FuncExpr) AppendString(dst []byte) []byte {
dst = appendEscapedIdent(dst, fe.Name)
dst = appendStringArgListExpr(dst, fe.Args)
return dst
}
// AggrFuncExpr represents aggregate function such as `sum(...) by (...)`
type AggrFuncExpr struct {
// Name is the function name.
Name string
// Args is the function args.
Args []Expr
// Modifier is optional modifier such as `by (...)` or `without (...)`.
Modifier ModifierExpr
// Optional limit for the number of output time series.
// This is MetricsQL extension.
//
// Example: `sum(...) by (...) limit 10` would return maximum 10 time series.
Limit int
}
// AppendString appends string representation of ae to dst and returns the result.
func (ae *AggrFuncExpr) AppendString(dst []byte) []byte {
dst = appendEscapedIdent(dst, ae.Name)
dst = appendStringArgListExpr(dst, ae.Args)
if ae.Modifier.Op != "" {
dst = append(dst, ' ')
dst = ae.Modifier.AppendString(dst)
}
if ae.Limit > 0 {
dst = append(dst, " limit "...)
dst = strconv.AppendInt(dst, int64(ae.Limit), 10)
}
return dst
}
// withExpr represents `with (...)` extension from MetricsQL.
//
// It isn't exported.
type withExpr struct {
Was []*withArgExpr
Expr Expr
}
// AppendString appends string representation of we to dst and returns the result.
func (we *withExpr) AppendString(dst []byte) []byte {
dst = append(dst, "WITH ("...)
for i, wa := range we.Was {
dst = wa.AppendString(dst)
if i+1 < len(we.Was) {
dst = append(dst, ',')
}
}
dst = append(dst, ") "...)
dst = we.Expr.AppendString(dst)
return dst
}
// withArgExpr represents a single entry from WITH expression.
//
// It isn't exported.
type withArgExpr struct {
Name string
Args []string
Expr Expr
}
// AppendString appends string representation of wa to dst and returns the result.
func (wa *withArgExpr) AppendString(dst []byte) []byte {
dst = appendEscapedIdent(dst, wa.Name)
if len(wa.Args) > 0 {
dst = append(dst, '(')
for i, arg := range wa.Args {
dst = appendEscapedIdent(dst, arg)
if i+1 < len(wa.Args) {
dst = append(dst, ',')
}
}
dst = append(dst, ')')
}
dst = append(dst, " = "...)
dst = wa.Expr.AppendString(dst)
return dst
}
// RollupExpr represents MetricsQL expression, which contains at least `offset` or `[...]` part.
type RollupExpr struct {
// The expression for the rollup. Usually it is MetricExpr, but may be arbitrary expr
// if subquery is used. https://prometheus.io/blog/2019/01/28/subquery-support/
Expr Expr
// Window contains optional window value from square brackets
//
// For example, `http_requests_total[5m]` will have Window value `5m`.
Window string
// Offset contains optional value from `offset` part.
//
// For example, `foobar{baz="aa"} offset 5m` will have Offset value `5m`.
Offset string
// Step contains optional step value from square brackets.
//
// For example, `foobar[1h:3m]` will have Step value '3m'.
Step string
// If set to true, then `foo[1h:]` would print the same
// instead of `foo[1h]`.
InheritStep bool
}
// ForSubquery returns true if re represents subquery.
func (re *RollupExpr) ForSubquery() bool {
return len(re.Step) > 0 || re.InheritStep
}
// AppendString appends string representation of re to dst and returns the result.
func (re *RollupExpr) AppendString(dst []byte) []byte {
needParens := func() bool {
if _, ok := re.Expr.(*RollupExpr); ok {
return true
}
if _, ok := re.Expr.(*BinaryOpExpr); ok {
return true
}
if ae, ok := re.Expr.(*AggrFuncExpr); ok && ae.Modifier.Op != "" {
return true
}
return false
}()
if needParens {
dst = append(dst, '(')
}
dst = re.Expr.AppendString(dst)
if needParens {
dst = append(dst, ')')
}
if len(re.Window) > 0 || re.InheritStep || len(re.Step) > 0 {
dst = append(dst, '[')
if len(re.Window) > 0 {
dst = append(dst, re.Window...)
}
if len(re.Step) > 0 {
dst = append(dst, ':')
dst = append(dst, re.Step...)
} else if re.InheritStep {
dst = append(dst, ':')
}
dst = append(dst, ']')
}
if len(re.Offset) > 0 {
dst = append(dst, " offset "...)
dst = append(dst, re.Offset...)
}
return dst
}
// LabelFilter represents MetricsQL label filter like `foo="bar"`.
type LabelFilter struct {
// Label contains label name for the filter.
Label string
// Value contains unquoted value for the filter.
Value string
// IsNegative reperesents whether the filter is negative, i.e. '!=' or '!~'.
IsNegative bool
// IsRegexp represents whether the filter is regesp, i.e. `=~` or `!~`.
IsRegexp bool
}
// AppendString appends string representation of me to dst and returns the result.
func (lf *LabelFilter) AppendString(dst []byte) []byte {
dst = appendEscapedIdent(dst, lf.Label)
var op string
if lf.IsNegative {
if lf.IsRegexp {
op = "!~"
} else {
op = "!="
}
} else {
if lf.IsRegexp {
op = "=~"
} else {
op = "="
}
}
dst = append(dst, op...)
dst = strconv.AppendQuote(dst, lf.Value)
return dst
}
// MetricExpr represents MetricsQL metric with optional filters, i.e. `foo{...}`.
type MetricExpr struct {
// LabelFilters contains a list of label filters from curly braces.
// Metric name if present must be the first.
LabelFilters []LabelFilter
// labelFilters must be expanded to LabelFilters by expandWithExpr.
labelFilters []*labelFilterExpr
}
// AppendString appends string representation of me to dst and returns the result.
func (me *MetricExpr) AppendString(dst []byte) []byte {
lfs := me.LabelFilters
if len(lfs) > 0 {
lf := &lfs[0]
if lf.Label == "__name__" && !lf.IsNegative && !lf.IsRegexp {
dst = appendEscapedIdent(dst, lf.Value)
lfs = lfs[1:]
}
}
if len(lfs) > 0 {
dst = append(dst, '{')
for i := range lfs {
dst = lfs[i].AppendString(dst)
if i+1 < len(lfs) {
dst = append(dst, ", "...)
}
}
dst = append(dst, '}')
} else if len(me.LabelFilters) == 0 {
dst = append(dst, "{}"...)
}
return dst
}
// IsEmpty returns true of me equals to `{}`.
func (me *MetricExpr) IsEmpty() bool {
return len(me.LabelFilters) == 0
}
func (me *MetricExpr) isOnlyMetricGroup() bool {
if !me.hasNonEmptyMetricGroup() {
return false
}
return len(me.LabelFilters) == 1
}
func (me *MetricExpr) hasNonEmptyMetricGroup() bool {
if len(me.LabelFilters) == 0 {
return false
}
lf := &me.LabelFilters[0]
return lf.Label == "__name__" && !lf.IsNegative && !lf.IsRegexp
}
| {
"pile_set_name": "Github"
} |
struct block;
static int
remove_out_of_scope_renamings (struct block *current_block)
{
return 1;
}
int
ada_lookup_symbol_list (const struct block *block0)
{
return remove_out_of_scope_renamings ((struct block *) block0);
}
| {
"pile_set_name": "Github"
} |
require File.join(File.dirname(__FILE__), 'spec_helper')
describe Nanite::MapperProxy do
describe "when fetching the instance" do
before do
Nanite::MapperProxy.class_eval do
if class_variable_defined?(:@@instance)
remove_class_variable(:@@instance)
end
end
end
it "should return nil when the instance is undefined" do
Nanite::MapperProxy.instance.should == nil
end
it "should return the instance if defined" do
instance = mock
Nanite::MapperProxy.class_eval do
@@instance = "instance"
end
Nanite::MapperProxy.instance.should_not == nil
end
end
describe "when requesting a message" do
before do
AMQP.stub!(:connect)
MQ.stub!(:new)
Nanite::MapperProxy.new('mapperproxy', {})
@instance = Nanite::MapperProxy.instance
@fanout = stub(:fanout, :publish => true)
@instance.amqp.stub!(:fanout).and_return(@fanout)
end
it "should raise an error if mapper proxy is not initialized" do
lambda {
@instance.stub!(:identity).and_return nil
@instance.request('/welcome/aboard', 'iZac'){|response|}
}.should raise_error("Mapper proxy not initialized")
end
it "should create a request object" do
@fanout.should_receive(:publish).with do |request|
request = @instance.serializer.load(request)
request.class.should == Nanite::Request
end
@instance.request('/welcome/aboard', 'iZac'){|response|}
end
it "should set correct attributes on the request message" do
@fanout.should_receive(:publish).with do |request|
request = @instance.serializer.load(request)
request.token.should_not == nil
request.persistent.should_not == true
request.from.should == 'mapperproxy'
end
@instance.request('/welcome/aboard', 'iZac'){|response|}
end
it "should mark the message as persistent when the option is specified on the parameter" do
@fanout.should_receive(:publish).with do |request|
request = @instance.serializer.load(request)
request.persistent.should == true
end
@instance.request('/welcome/aboard', 'iZac', :persistent => true){|response|}
end
it "should set the correct target if specified" do
@fanout.should_receive(:publish).with do |request|
request = @instance.serializer.load(request)
request.target.should == 'my-target'
end
@instance.request('/welcome/aboard', 'iZac', :target => 'my-target'){|response|}
end
it "should mark the message as persistent when the option is set globally" do
@instance.options[:persistent] = true
@fanout.should_receive(:publish).with do |request|
request = @instance.serializer.load(request)
request.persistent.should == true
end
@instance.request('/welcome/aboard', 'iZac'){|response|}
end
it "should store the intermediate handler" do
intermediate = lambda {}
Nanite::Identity.stub!(:generate).and_return('abc')
@fanout.stub!(:fanout)
@instance.request('/welcome/aboard', 'iZac', :target => 'my-target', :intermediate_handler => intermediate ){|response|}
@instance.pending_requests['abc'][:intermediate_handler].should == intermediate
end
it "should store the result handler" do
result_handler = lambda {}
Nanite::Identity.stub!(:generate).and_return('abc')
@fanout.stub!(:fanout)
@instance.request('/welcome/aboard', 'iZac',{}, &result_handler)
@instance.pending_requests['abc'][:result_handler].should == result_handler
end
end
describe "when pushing a message" do
before do
AMQP.stub!(:connect)
MQ.stub!(:new)
Nanite::MapperProxy.new('mapperproxy', {})
@instance = Nanite::MapperProxy.instance
@fanout = stub(:fanout, :publish => true)
@instance.amqp.stub!(:fanout).and_return(@fanout)
end
it "should raise an error if mapper proxy is not initialized" do
lambda {
@instance.stub!(:identity).and_return nil
@instance.push('/welcome/aboard', 'iZac')
}.should raise_error("Mapper proxy not initialized")
end
it "should create a push object" do
@fanout.should_receive(:publish).with do |push|
push = @instance.serializer.load(push)
push.class.should == Nanite::Push
end
@instance.push('/welcome/aboard', 'iZac')
end
it "should set the correct target if specified" do
@fanout.should_receive(:publish).with do |push|
push = @instance.serializer.load(push)
push.target.should == 'my-target'
end
@instance.push('/welcome/aboard', 'iZac', :target => 'my-target')
end
it "should set correct attributes on the push message" do
@fanout.should_receive(:publish).with do |push|
push = @instance.serializer.load(push)
push.token.should_not == nil
push.persistent.should_not == true
push.from.should == 'mapperproxy'
end
@instance.push('/welcome/aboard', 'iZac')
end
it "should mark the message as persistent when the option is specified on the parameter" do
@fanout.should_receive(:publish).with do |push|
push = @instance.serializer.load(push)
push.persistent.should == true
end
@instance.push('/welcome/aboard', 'iZac', :persistent => true)
end
it "should mark the message as persistent when the option is set globally" do
@instance.options[:persistent] = true
@fanout.should_receive(:publish).with do |push|
push = @instance.serializer.load(push)
push.persistent.should == true
end
@instance.push('/welcome/aboard', 'iZac')
end
end
describe "when handling results" do
before(:each) do
AMQP.stub!(:connect)
MQ.stub!(:new)
Nanite::MapperProxy.new('mapperproxy', {})
@instance = Nanite::MapperProxy.instance
@fanout = stub(:fanout, :publish => true)
@instance.amqp.stub!(:fanout).and_return(@fanout)
@payload = {:payload => ['nanite', 'eventmachine', 'rabbitmq']}
end
describe 'final results' do
before do
@response = mock("Response")
@response.should_receive(:token).and_return("test_token")
@response.should_receive(:results).twice.and_return({:payload => ['nanite', 'eventmachine', 'rabbitmq']})
result_handler = lambda {}
@fanout.stub!(:fanout)
@instance.pending_requests["test_token"] = {:result_handler => Proc.new{ @response.results} }
@instance.request('/welcome/aboard', 'iZac',{}, &result_handler)
end
it "should return the provided payload through the result handler" do
@instance.handle_result(@response).should == @payload
end
end
describe 'intermediate results' do
before do
@response = mock("Response")
@response.should_receive(:token).and_return("test_token_2")
@response.should_receive(:results).twice.and_return({:payload => ['nanite', 'eventmachine', 'rabbitmq']})
result_handler = lambda {}
@fanout.stub!(:fanout)
int_handler = Proc.new{ @response.results.merge(:time => Time.now)}
@instance.pending_requests["test_token_2"] = {:result_handler => Proc.new{ @response.results},
:intermediate_handler => int_handler}
@instance.request('/welcome/aboard', 'iZac', :intermediate_handler => int_handler, &result_handler)
end
it "should provide a Hash for intermediate results" do
@instance.handle_intermediate_result(@response).should be_kind_of(Hash)
end
end
end
end | {
"pile_set_name": "Github"
} |
polygon
1
8.301650E+00 4.613302E+01
8.302922E+00 4.613326E+01
8.303081E+00 4.613356E+01
8.303596E+00 4.613457E+01
8.304129E+00 4.613564E+01
8.304654E+00 4.613660E+01
8.304852E+00 4.613710E+01
8.305642E+00 4.613848E+01
8.306071E+00 4.613931E+01
8.306486E+00 4.614021E+01
8.306757E+00 4.614070E+01
8.307506E+00 4.614105E+01
8.308883E+00 4.614160E+01
8.309128E+00 4.614171E+01
8.311585E+00 4.614279E+01
8.312387E+00 4.614315E+01
8.313325E+00 4.614358E+01
8.315488E+00 4.614449E+01
8.316144E+00 4.614470E+01
8.316906E+00 4.614504E+01
8.317261E+00 4.614520E+01
8.317694E+00 4.614539E+01
8.317891E+00 4.614548E+01
8.320040E+00 4.614643E+01
8.320952E+00 4.614683E+01
8.322038E+00 4.614730E+01
8.322845E+00 4.614767E+01
8.322813E+00 4.614818E+01
8.323460E+00 4.614859E+01
8.323825E+00 4.614901E+01
8.325186E+00 4.614965E+01
8.325585E+00 4.614980E+01
8.325914E+00 4.614991E+01
8.327024E+00 4.615047E+01
8.327760E+00 4.615047E+01
8.329943E+00 4.615078E+01
8.331176E+00 4.615107E+01
8.332233E+00 4.615114E+01
8.333516E+00 4.615123E+01
8.335548E+00 4.615133E+01
8.336489E+00 4.615141E+01
8.337437E+00 4.615156E+01
8.338859E+00 4.615186E+01
8.339213E+00 4.615194E+01
8.342533E+00 4.615268E+01
8.344569E+00 4.615320E+01
8.346315E+00 4.615360E+01
8.349049E+00 4.615426E+01
8.350667E+00 4.615481E+01
8.352044E+00 4.615528E+01
8.352851E+00 4.615555E+01
8.354519E+00 4.615619E+01
8.355591E+00 4.615644E+01
8.356788E+00 4.615673E+01
8.358028E+00 4.615687E+01
8.359161E+00 4.615696E+01
8.360327E+00 4.615700E+01
8.361180E+00 4.615708E+01
8.361718E+00 4.615714E+01
8.362476E+00 4.615714E+01
8.366753E+00 4.615712E+01
8.369003E+00 4.615709E+01
8.370147E+00 4.615699E+01
8.370981E+00 4.615684E+01
8.371750E+00 4.615666E+01
8.372662E+00 4.615643E+01
8.373305E+00 4.615634E+01
8.373754E+00 4.615640E+01
8.374219E+00 4.615655E+01
8.375172E+00 4.615709E+01
8.375711E+00 4.615736E+01
8.375955E+00 4.615784E+01
8.376095E+00 4.615798E+01
8.377071E+00 4.615862E+01
8.378686E+00 4.615958E+01
8.379663E+00 4.616019E+01
8.380987E+00 4.616099E+01
8.382502E+00 4.616195E+01
8.384193E+00 4.616286E+01
8.385477E+00 4.616358E+01
8.386654E+00 4.616417E+01
8.388337E+00 4.616496E+01
8.389241E+00 4.616543E+01
8.389407E+00 4.616553E+01
8.390674E+00 4.616630E+01
8.391667E+00 4.616696E+01
8.392871E+00 4.616776E+01
8.393214E+00 4.616799E+01
8.393827E+00 4.616841E+01
8.394613E+00 4.616891E+01
8.395466E+00 4.616948E+01
8.396037E+00 4.616987E+01
8.397013E+00 4.617053E+01
8.397966E+00 4.617111E+01
8.398503E+00 4.617152E+01
8.399530E+00 4.617217E+01
8.400501E+00 4.617260E+01
8.401364E+00 4.617299E+01
8.402343E+00 4.617342E+01
8.403594E+00 4.617412E+01
8.404125E+00 4.617435E+01
8.404903E+00 4.617490E+01
8.405328E+00 4.617498E+01
8.406650E+00 4.617526E+01
8.407357E+00 4.617546E+01
8.407944E+00 4.617595E+01
8.408447E+00 4.617643E+01
8.408750E+00 4.617693E+01
8.408836E+00 4.617753E+01
8.409050E+00 4.617779E+01
8.409424E+00 4.617790E+01
8.409714E+00 4.617806E+01
8.410464E+00 4.617886E+01
8.411419E+00 4.617997E+01
8.411664E+00 4.617971E+01
8.412392E+00 4.617874E+01
8.412842E+00 4.617795E+01
8.413275E+00 4.617719E+01
8.413775E+00 4.617645E+01
8.414468E+00 4.617562E+01
8.414915E+00 4.617511E+01
8.415485E+00 4.617488E+01
8.416404E+00 4.617462E+01
8.416849E+00 4.617440E+01
8.417143E+00 4.617414E+01
8.417672E+00 4.617385E+01
8.417982E+00 4.617369E+01
8.418300E+00 4.617361E+01
8.418877E+00 4.617344E+01
8.419024E+00 4.617292E+01
8.419118E+00 4.617271E+01
8.419213E+00 4.617243E+01
8.419266E+00 4.617206E+01
8.419435E+00 4.617188E+01
8.419521E+00 4.617160E+01
8.419758E+00 4.617129E+01
8.419977E+00 4.617107E+01
8.420196E+00 4.617084E+01
8.420647E+00 4.617076E+01
8.420856E+00 4.617075E+01
8.421081E+00 4.617077E+01
8.421306E+00 4.617075E+01
8.421607E+00 4.617069E+01
8.421841E+00 4.617064E+01
8.422241E+00 4.617063E+01
8.422759E+00 4.617053E+01
8.423111E+00 4.617041E+01
8.423537E+00 4.617024E+01
8.424205E+00 4.617014E+01
8.424899E+00 4.617003E+01
8.425743E+00 4.616984E+01
8.426210E+00 4.616975E+01
8.426802E+00 4.616977E+01
8.427319E+00 4.616980E+01
8.427794E+00 4.616979E+01
8.428144E+00 4.616977E+01
8.428423E+00 4.616970E+01
8.428237E+00 4.616968E+01
8.427784E+00 4.616962E+01
8.427332E+00 4.616945E+01
8.426942E+00 4.616923E+01
8.426290E+00 4.616921E+01
8.425663E+00 4.616912E+01
8.425155E+00 4.616894E+01
8.425062E+00 4.616891E+01
8.424763E+00 4.616895E+01
8.424416E+00 4.616889E+01
8.423796E+00 4.616868E+01
8.423542E+00 4.616863E+01
8.423169E+00 4.616862E+01
8.422717E+00 4.616845E+01
8.422128E+00 4.616833E+01
8.421308E+00 4.616825E+01
8.420756E+00 4.616817E+01
8.419911E+00 4.616810E+01
8.418097E+00 4.616793E+01
8.416768E+00 4.616782E+01
8.415549E+00 4.616784E+01
8.414849E+00 4.616772E+01
8.414153E+00 4.616761E+01
8.413604E+00 4.616724E+01
8.412807E+00 4.616677E+01
8.411483E+00 4.616623E+01
8.410479E+00 4.616594E+01
8.409690E+00 4.616586E+01
8.408496E+00 4.616587E+01
8.407347E+00 4.616578E+01
8.406287E+00 4.616557E+01
8.405333E+00 4.616521E+01
8.405006E+00 4.616497E+01
8.404735E+00 4.616478E+01
8.404426E+00 4.616462E+01
8.403831E+00 4.616444E+01
8.403459E+00 4.616437E+01
8.403050E+00 4.616422E+01
8.402606E+00 4.616388E+01
8.402304E+00 4.616365E+01
8.401815E+00 4.616346E+01
8.401322E+00 4.616309E+01
8.400538E+00 4.616254E+01
8.400069E+00 4.616225E+01
8.399608E+00 4.616181E+01
8.399436E+00 4.616165E+01
8.398949E+00 4.616118E+01
8.398611E+00 4.616087E+01
8.398551E+00 4.616071E+01
8.398227E+00 4.616015E+01
8.397965E+00 4.615969E+01
8.397376E+00 4.615898E+01
8.396914E+00 4.615863E+01
8.396007E+00 4.615801E+01
8.395748E+00 4.615780E+01
8.395677E+00 4.615773E+01
8.395275E+00 4.615735E+01
8.394862E+00 4.615699E+01
8.394659E+00 4.615678E+01
8.394611E+00 4.615666E+01
8.393993E+00 4.615633E+01
8.391869E+00 4.615559E+01
8.389497E+00 4.615480E+01
8.388773E+00 4.615455E+01
8.387689E+00 4.615426E+01
8.386808E+00 4.615413E+01
8.386693E+00 4.615388E+01
8.386523E+00 4.615346E+01
8.386081E+00 4.615242E+01
8.385544E+00 4.615131E+01
8.385473E+00 4.615117E+01
8.384920E+00 4.615009E+01
8.384628E+00 4.614953E+01
8.384277E+00 4.614919E+01
8.383867E+00 4.614904E+01
8.383026E+00 4.614810E+01
8.382116E+00 4.614706E+01
8.381442E+00 4.614621E+01
8.380631E+00 4.614530E+01
8.380597E+00 4.614522E+01
8.380502E+00 4.614499E+01
8.380455E+00 4.614425E+01
8.380400E+00 4.614365E+01
8.380289E+00 4.614349E+01
8.379746E+00 4.614289E+01
8.379656E+00 4.614267E+01
8.379796E+00 4.614249E+01
8.380481E+00 4.614185E+01
8.380915E+00 4.614137E+01
8.381348E+00 4.614096E+01
8.381474E+00 4.614065E+01
8.381369E+00 4.614039E+01
8.381103E+00 4.613961E+01
8.381069E+00 4.613878E+01
8.381063E+00 4.613863E+01
8.381051E+00 4.613699E+01
8.381041E+00 4.613602E+01
8.381094E+00 4.613499E+01
8.380596E+00 4.613485E+01
8.379386E+00 4.613447E+01
8.379270E+00 4.613443E+01
8.378254E+00 4.613413E+01
8.376062E+00 4.613325E+01
8.375032E+00 4.613286E+01
8.374366E+00 4.613256E+01
8.372494E+00 4.613155E+01
8.371435E+00 4.613090E+01
8.371054E+00 4.613073E+01
8.370720E+00 4.613080E+01
8.370001E+00 4.613098E+01
8.369409E+00 4.613116E+01
8.369090E+00 4.613126E+01
8.368505E+00 4.613146E+01
8.367819E+00 4.613163E+01
8.367401E+00 4.613177E+01
8.367219E+00 4.613170E+01
8.366682E+00 4.613131E+01
8.365994E+00 4.613067E+01
8.365272E+00 4.613000E+01
8.363597E+00 4.612858E+01
8.363196E+00 4.612796E+01
8.362544E+00 4.612744E+01
8.362114E+00 4.612716E+01
8.361339E+00 4.612648E+01
8.360900E+00 4.612627E+01
8.360269E+00 4.612605E+01
8.359588E+00 4.612589E+01
8.359449E+00 4.612565E+01
8.359285E+00 4.612539E+01
8.359214E+00 4.612509E+01
8.359067E+00 4.612481E+01
8.359022E+00 4.612444E+01
8.358662E+00 4.612451E+01
8.358195E+00 4.612458E+01
8.357578E+00 4.612465E+01
8.357110E+00 4.612474E+01
8.356385E+00 4.612481E+01
8.356093E+00 4.612483E+01
8.355661E+00 4.612478E+01
8.355209E+00 4.612482E+01
8.354049E+00 4.612509E+01
8.354046E+00 4.612534E+01
8.354028E+00 4.612551E+01
8.354201E+00 4.612563E+01
8.354000E+00 4.612569E+01
8.353559E+00 4.612569E+01
8.353017E+00 4.612570E+01
8.352760E+00 4.612562E+01
8.352080E+00 4.612536E+01
8.351886E+00 4.612530E+01
8.351466E+00 4.612519E+01
8.350925E+00 4.612509E+01
8.350392E+00 4.612512E+01
8.349715E+00 4.612526E+01
8.348824E+00 4.612526E+01
8.348567E+00 4.612515E+01
8.348360E+00 4.612507E+01
8.347876E+00 4.612510E+01
8.347641E+00 4.612526E+01
8.347513E+00 4.612551E+01
8.347336E+00 4.612572E+01
8.346932E+00 4.612598E+01
8.346515E+00 4.612610E+01
8.346098E+00 4.612615E+01
8.345796E+00 4.612631E+01
8.345642E+00 4.612663E+01
8.345482E+00 4.612678E+01
8.345233E+00 4.612672E+01
8.345034E+00 4.612660E+01
8.344894E+00 4.612646E+01
8.344589E+00 4.612622E+01
8.344481E+00 4.612614E+01
8.344005E+00 4.612611E+01
8.343480E+00 4.612634E+01
8.342996E+00 4.612632E+01
8.342422E+00 4.612625E+01
8.342073E+00 4.612617E+01
8.341723E+00 4.612621E+01
8.341363E+00 4.612641E+01
8.340935E+00 4.612661E+01
8.340460E+00 4.612669E+01
8.340019E+00 4.612658E+01
8.339744E+00 4.612649E+01
8.339671E+00 4.612646E+01
8.339407E+00 4.612627E+01
8.339242E+00 4.612608E+01
8.339051E+00 4.612602E+01
8.338892E+00 4.612614E+01
8.338679E+00 4.612646E+01
8.338728E+00 4.612661E+01
8.338767E+00 4.612681E+01
8.338566E+00 4.612692E+01
8.338183E+00 4.612691E+01
8.337799E+00 4.612698E+01
8.337512E+00 4.612721E+01
8.337474E+00 4.612763E+01
8.337159E+00 4.612823E+01
8.337080E+00 4.612857E+01
8.336958E+00 4.612895E+01
8.336780E+00 4.612929E+01
8.336437E+00 4.612935E+01
8.336147E+00 4.612927E+01
8.335965E+00 4.612916E+01
8.335700E+00 4.612901E+01
8.335486E+00 4.612876E+01
8.335204E+00 4.612871E+01
8.334814E+00 4.612856E+01
8.334522E+00 4.612855E+01
8.333853E+00 4.612880E+01
8.333692E+00 4.612900E+01
8.333564E+00 4.612924E+01
8.333496E+00 4.612936E+01
8.333492E+00 4.612968E+01
8.333261E+00 4.613022E+01
8.333206E+00 4.613068E+01
8.333251E+00 4.613099E+01
8.333121E+00 4.613151E+01
8.333077E+00 4.613168E+01
8.332720E+00 4.613223E+01
8.332258E+00 4.613255E+01
8.331790E+00 4.613268E+01
8.331446E+00 4.613286E+01
8.331235E+00 4.613308E+01
8.331039E+00 4.613340E+01
8.330879E+00 4.613360E+01
8.330568E+00 4.613380E+01
8.330267E+00 4.613387E+01
8.329909E+00 4.613385E+01
8.329461E+00 4.613371E+01
8.329317E+00 4.613368E+01
8.328671E+00 4.613359E+01
8.328272E+00 4.613351E+01
8.327790E+00 4.613340E+01
8.327548E+00 4.613340E+01
8.327233E+00 4.613339E+01
8.327132E+00 4.613340E+01
8.326748E+00 4.613340E+01
8.326653E+00 4.613340E+01
8.326538E+00 4.613341E+01
8.326201E+00 4.613325E+01
8.325878E+00 4.613312E+01
8.325663E+00 4.613295E+01
8.325633E+00 4.613286E+01
8.325359E+00 4.613202E+01
8.325294E+00 4.613182E+01
8.325277E+00 4.613177E+01
8.324962E+00 4.613095E+01
8.324940E+00 4.613089E+01
8.324825E+00 4.613059E+01
8.324689E+00 4.613013E+01
8.324541E+00 4.612984E+01
8.324502E+00 4.612976E+01
8.324465E+00 4.612971E+01
8.324424E+00 4.612965E+01
8.324058E+00 4.612912E+01
8.323610E+00 4.612846E+01
8.323161E+00 4.612775E+01
8.323036E+00 4.612763E+01
8.322933E+00 4.612752E+01
8.322815E+00 4.612740E+01
8.322469E+00 4.612709E+01
8.322023E+00 4.612677E+01
8.321254E+00 4.612624E+01
8.320619E+00 4.612576E+01
8.320336E+00 4.612559E+01
8.320213E+00 4.612552E+01
8.319562E+00 4.612500E+01
8.318936E+00 4.612441E+01
8.318757E+00 4.612423E+01
8.318278E+00 4.612374E+01
8.317746E+00 4.612318E+01
8.317555E+00 4.612298E+01
8.317354E+00 4.612276E+01
8.317135E+00 4.612253E+01
8.316777E+00 4.612218E+01
8.316602E+00 4.612200E+01
8.316492E+00 4.612190E+01
8.316195E+00 4.612161E+01
8.316132E+00 4.612155E+01
8.316066E+00 4.612160E+01
8.316011E+00 4.612165E+01
8.315907E+00 4.612174E+01
8.313874E+00 4.612025E+01
8.314201E+00 4.612005E+01
8.314077E+00 4.611993E+01
8.313825E+00 4.611953E+01
8.313644E+00 4.611925E+01
8.313120E+00 4.611848E+01
8.312695E+00 4.611786E+01
8.312131E+00 4.611700E+01
8.311548E+00 4.611629E+01
8.311376E+00 4.611609E+01
8.311060E+00 4.611597E+01
8.310893E+00 4.611590E+01
8.310503E+00 4.611581E+01
8.310006E+00 4.611569E+01
8.309742E+00 4.611563E+01
8.309606E+00 4.611559E+01
8.309408E+00 4.611554E+01
8.307047E+00 4.611594E+01
8.305911E+00 4.611616E+01
8.305195E+00 4.611626E+01
8.305048E+00 4.611658E+01
8.304500E+00 4.611825E+01
8.304124E+00 4.611945E+01
8.304010E+00 4.611990E+01
8.303159E+00 4.612163E+01
8.302218E+00 4.612352E+01
8.301704E+00 4.612474E+01
8.301549E+00 4.612563E+01
8.301412E+00 4.612734E+01
8.301336E+00 4.612876E+01
8.301371E+00 4.613000E+01
8.301460E+00 4.613156E+01
8.301650E+00 4.613302E+01
END
END
| {
"pile_set_name": "Github"
} |
<link rel="stylesheet" href="../../vendor/css/bootstrap.min.css" />
<p>
Personal Rules by
<a href="https://github.com/rakeshtembhurne">@rakeshtembhurne</a>. Disables
auto jumping keys which are typed-in automatically anytime causing unwanted
typed characters or behavior.
</p>
<p>
For me key `left_option` started turning on anytime, causing weird symbols to
appear while writing. Also it is in pressed down state always, causing every
other key to behave differently. Sometimes I could not even log in as correct
password is not being typed. Similar things happened with number 9 key. It
started auto typing automatically.
</p>
<p>
This configuration is settings for disabling 'left_option' key and '9' key
(which is also a '(' when used with shift key). With these configs applied,
normal behavior of key 9 will be disabled and they will only work when they
are pressed with 'fn' button.
</p>
<p>
So, fn + 9 will be 9, and fn + shift + 9 will be '('. Disabled keys will be
left_option, 9 and shift + 9 which is '('.
</p>
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
Copyright (C) 2011-2014 SequoiaDB Ltd.
This program is free software: you can redistribute it and/or modify
it under the term of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warrenty of
MARCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/license/>.
Source File Name = clsTask.hpp
Descriptive Name = Data Management Service Header
Dependencies: N/A
Restrictions: N/A
Change Activity:
defect Date Who Description
====== =========== === ==============================================
17/03/2013 Xu Jianhui Initial Draft
Last Changed =
*******************************************************************************/
#ifndef CLS_TASK_HPP_
#define CLS_TASK_HPP_
#include "core.hpp"
#include "oss.hpp"
#include "clsBase.hpp"
#include "ossLatch.hpp"
#include "clsCatalogAgent.hpp"
#include <string>
#include <map>
#include "../bson/bson.h"
using namespace bson ;
namespace engine
{
enum CLS_TASK_TYPE
{
CLS_TASK_SPLIT = 0, //split task
CLS_TASK_UNKNOW = 255
} ;
enum CLS_TASK_STATUS
{
CLS_TASK_STATUS_READY = 0, // when initially created
CLS_TASK_STATUS_RUN = 1, // when starts running
CLS_TASK_STATUS_PAUSE = 2, // when is halt
CLS_TASK_STATUS_CANCELED= 3, // canceled
CLS_TASK_STATUS_META = 4, // when meta( ex:catalog info ) changed
CLS_TASK_STATUS_FINISH = 9, // when stopped, this should be the last
CLS_TASK_STATUS_END = 10 // nothing should have this status
} ;
#define CLS_INVALID_TASKID (0)
class _clsTask : public SDBObject
{
public:
_clsTask ( UINT64 taskID ) : _taskID ( taskID )
{
_status = CLS_TASK_STATUS_READY ;
}
virtual ~_clsTask () {}
UINT64 taskID () const { return _taskID ; }
CLS_TASK_STATUS status () const { return _status ; }
void setStatus( CLS_TASK_STATUS status ) { _status = status ; }
public:
virtual CLS_TASK_TYPE taskType () const = 0 ;
virtual const CHAR* taskName () const = 0 ;
virtual const CHAR* collectionName() const = 0 ;
virtual const CHAR* collectionSpaceName() const = 0 ;
virtual BOOLEAN muteXOn ( const _clsTask *pOther ) = 0 ;
protected:
UINT64 _taskID ;
CLS_TASK_STATUS _status ;
};
typedef _clsTask clsTask ;
class _clsDummyTask : public _clsTask
{
public:
_clsDummyTask ( UINT64 taskID ) ;
~_clsDummyTask () ;
virtual CLS_TASK_TYPE taskType () const ;
virtual const CHAR* taskName () const ;
virtual const CHAR* collectionName() const ;
virtual const CHAR* collectionSpaceName() const ;
virtual BOOLEAN muteXOn ( const _clsTask *pOther ) ;
};
class _clsTaskMgr : public SDBObject
{
public:
_clsTaskMgr ( UINT64 maxTaskID = CLS_INVALID_TASKID ) ;
~_clsTaskMgr () ;
UINT64 getTaskID () ;
void setTaskID ( UINT64 taskID ) ;
public:
UINT32 taskCount () ;
UINT32 taskCount( CLS_TASK_TYPE type ) ;
UINT32 taskCountByCL( const CHAR *pCLName ) ;
UINT32 taskCountByCS( const CHAR *pCSName ) ;
INT32 waitTaskEvent( INT64 millisec = OSS_ONE_SEC ) ;
INT32 addTask ( _clsTask *pTask,
UINT64 taskID = CLS_INVALID_TASKID ) ;
INT32 removeTask ( _clsTask *pTask ) ;
INT32 removeTask ( UINT64 taskID ) ;
_clsTask* findTask ( UINT64 taskID ) ;
void stopTask ( UINT64 taskID ) ;
public:
void regCollection( const string &clName ) ;
void unregCollection( const string &clName ) ;
void lockReg( OSS_LATCH_MODE mode = SHARED ) ;
void releaseReg( OSS_LATCH_MODE mode = SHARED ) ;
UINT32 getRegCount( const string &clName,
BOOLEAN noLatch = FALSE ) ;
string dumpTasks( CLS_TASK_TYPE type ) ;
private:
std::map<UINT64, _clsTask*> _taskMap ;
ossSpinSLatch _taskLatch ;
ossAutoEvent _taskEvent ;
std::map<string, UINT32> _mapRegister ;
ossSpinSLatch _regLatch ;
UINT64 _taskID ;
UINT64 _maxID ;
};
typedef _clsTaskMgr clsTaskMgr ;
#define CLS_MASK_ALL (~0)
#define CLS_SPLIT_MASK_ID 0x00000001
#define CLS_SPLIT_MASK_TYPE 0x00000002
#define CLS_SPLIT_MASK_STATUS 0x00000004
#define CLS_SPLIT_MASK_CLNAME 0x00000008
#define CLS_SPLIT_MASK_SOURCEID 0x00000010
#define CLS_SPLIT_MASK_SOURCENAME 0x00000020
#define CLS_SPLIT_MASK_DSTID 0x00000040
#define CLS_SPLIT_MASK_DSTNAME 0x00000080
#define CLS_SPLIT_MASK_BKEY 0x00000100
#define CLS_SPLIT_MASK_EKEY 0x00000200
#define CLS_SPLIT_MASK_SHARDINGKEY 0x00000400
#define CLS_SPLIT_MASK_SHARDINGTYPE 0x00000800
#define CLS_SPLIT_MASK_PERCENT 0x00001000
class _clsSplitTask : public _clsTask
{
public:
_clsSplitTask ( UINT64 taskID ) ;
virtual ~_clsSplitTask () ;
INT32 init ( const CHAR *objdata ) ;
INT32 init ( const CHAR *clFullName, INT32 sourceID,
const CHAR *sourceName, INT32 dstID,
const CHAR *dstName, const BSONObj &bKey,
const BSONObj &eKey, FLOAT64 percent,
clsCatalogSet &cataSet ) ;
INT32 calcHashPartition ( clsCatalogSet &cataSet, INT32 groupID,
FLOAT64 percent, BSONObj &bKey,
BSONObj &eKey ) ;
BSONObj toBson ( UINT32 mask = CLS_MASK_ALL ) const ;
BOOLEAN isHashSharding() const ;
public:
virtual CLS_TASK_TYPE taskType () const ;
virtual const CHAR* taskName () const ;
virtual const CHAR* collectionName() const ;
virtual const CHAR* collectionSpaceName() const ;
virtual BOOLEAN muteXOn ( const _clsTask *pOther ) ;
public:
const CHAR* clFullName () const ;
const CHAR* shardingType () const ;
const CHAR* sourceName () const ;
const CHAR* dstName () const ;
UINT32 sourceID () const ;
UINT32 dstID () const ;
BSONObj splitKeyObj () const ;
BSONObj splitEndKeyObj () const ;
BSONObj shardingKey () const ;
protected:
BSONObj _getOrdering () const ;
void _makeName() ;
protected:
std::string _clFullName ;
std::string _csName ;
std::string _sourceName ;
std::string _dstName ;
std::string _shardingType ;
UINT32 _sourceID ;
UINT32 _dstID ;
BSONObj _splitKeyObj ;
BSONObj _splitEndKeyObj ;
BSONObj _shardingKey ;
CLS_TASK_TYPE _taskType ;
FLOAT64 _percent ;
private:
std::string _taskName ;
};
typedef _clsSplitTask clsSplitTask ;
}
#endif //CLS_TASK_HPP_
| {
"pile_set_name": "Github"
} |
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.druid.bvt.filter.wall.mysql;
import junit.framework.TestCase;
import org.junit.Assert;
import com.alibaba.druid.wall.WallProvider;
import com.alibaba.druid.wall.spi.MySqlWallProvider;
public class MySqlWallTest106 extends TestCase {
public void test_false() throws Exception {
WallProvider provider = new MySqlWallProvider();
provider.getConfig().setCommentAllow(false);
String sql = "select * from t where id = ? or bin(1) = 1";
Assert.assertFalse(provider.checkValid(sql));
}
}
| {
"pile_set_name": "Github"
} |
# Reviewed by [your name here]
dataset: hayes_roth
description: None yet. See our contributing guide to help us add one.
source: None yet. See our contributing guide to help us add one.
publication: None yet. See our contributing guide to help us add one.
task: classification
keywords:
-
-
target:
type: categorical
description: None yet. See our contributing guide to help us add one.
code: None yet. See our contributing guide to help us add one.
features:
- name: Hobby
type: continuous
description: # optional but recommended, what the feature measures/indicates, unit
code: # optional, coding information, e.g., Control = 0, Case = 1
transform: # optional, any transformation performed on the feature, e.g., log scaled
- name: Age
type: continuous
description:
code:
transform:
- name: Education
type: continuous
description:
code:
transform:
- name: Marital status
type: continuous
description:
code:
transform:
| {
"pile_set_name": "Github"
} |
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <[email protected]>
* Jike Song <[email protected]>
*
* Contributors:
* Zhi Wang <[email protected]>
* Min He <[email protected]>
* Bing Niu <[email protected]>
*
*/
#include "i915_drv.h"
#include "gvt.h"
enum {
INTEL_GVT_PCI_BAR_GTTMMIO = 0,
INTEL_GVT_PCI_BAR_APERTURE,
INTEL_GVT_PCI_BAR_PIO,
INTEL_GVT_PCI_BAR_MAX,
};
/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
* byte) byte by byte in standard pci configuration space. (not the full
* 256 bytes.)
*/
static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
[PCI_COMMAND] = 0xff, 0x07,
[PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
[PCI_CACHE_LINE_SIZE] = 0xff,
[PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
[PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
[PCI_INTERRUPT_LINE] = 0xff,
};
/**
* vgpu_pci_cfg_mem_write - write virtual cfg space memory
* @vgpu: target vgpu
* @off: offset
* @src: src ptr to write
* @bytes: number of bytes
*
* Use this function to write virtual cfg space memory.
* For standard cfg space, only RW bits can be changed,
* and we emulates the RW1C behavior of PCI_STATUS register.
*/
static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
u8 *src, unsigned int bytes)
{
u8 *cfg_base = vgpu_cfg_space(vgpu);
u8 mask, new, old;
int i = 0;
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
mask = pci_cfg_space_rw_bmp[off + i];
old = cfg_base[off + i];
new = src[i] & mask;
/**
* The PCI_STATUS high byte has RW1C bits, here
* emulates clear by writing 1 for these bits.
* Writing a 0b to RW1C bits has no effect.
*/
if (off + i == PCI_STATUS + 1)
new = (~new & old) & mask;
cfg_base[off + i] = (old & ~mask) | new;
}
/* For other configuration space directly copy as it is. */
if (i < bytes)
memcpy(cfg_base + off + i, src + i, bytes - i);
}
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
* @vgpu: target vgpu
* @offset: offset
* @p_data: return data ptr
* @bytes: number of bytes to read
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
return 0;
}
static int map_aperture(struct intel_vgpu *vgpu, bool map)
{
phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
u64 first_gfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
else
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
aperture_pa >> PAGE_SHIFT,
aperture_sz >> PAGE_SHIFT,
map);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
}
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
u64 start, end;
u64 val;
int ret;
if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
else
start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
start &= ~GENMASK(3, 0);
end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
return 0;
}
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u8 old = vgpu_cfg_space(vgpu)[offset];
u8 new = *(u8 *)p_data;
u8 changed = old ^ new;
int ret;
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
if (old & PCI_COMMAND_MEMORY) {
ret = trap_gttmmio(vgpu, false);
if (ret)
return ret;
ret = map_aperture(vgpu, false);
if (ret)
return ret;
} else {
ret = trap_gttmmio(vgpu, true);
if (ret)
return ret;
ret = map_aperture(vgpu, true);
if (ret)
return ret;
}
return 0;
}
static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
u32 new = *(u32 *)(p_data);
if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
/* We don't have rom, return size of 0. */
*pval = 0;
else
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
return 0;
}
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
/*
* Power-up software can determine how much address
* space the device requires by writing a value of
* all 1's to the register and then reading the value
* back. The device will return 0's in all don't-care
* address bits.
*/
if (new == 0xffffffff) {
switch (offset) {
case PCI_BASE_ADDRESS_0:
case PCI_BASE_ADDRESS_1:
size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
intel_vgpu_write_pci_bar(vgpu, offset,
size >> (lo ? 0 : 32), lo);
/*
* Untrap the BAR, since guest hasn't configured a
* valid GPA
*/
ret = trap_gttmmio(vgpu, false);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
intel_vgpu_write_pci_bar(vgpu, offset,
size >> (lo ? 0 : 32), lo);
ret = map_aperture(vgpu, false);
break;
default:
/* Unimplemented BARs */
intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
}
} else {
switch (offset) {
case PCI_BASE_ADDRESS_0:
case PCI_BASE_ADDRESS_1:
/*
* Untrap the old BAR first, since guest has
* re-configured the BAR
*/
trap_gttmmio(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
ret = trap_gttmmio(vgpu, mmio_enabled);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
map_aperture(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
ret = map_aperture(vgpu, mmio_enabled);
break;
default:
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
}
}
return ret;
}
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
* @vgpu: target vgpu
* @offset: offset
* @p_data: write data ptr
* @bytes: number of bytes to write
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
int ret;
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
if (WARN_ON(bytes > 2))
return -EINVAL;
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
}
switch (rounddown(offset, 4)) {
case PCI_ROM_ADDRESS:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
case INTEL_GVT_PCI_SWSCI:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
if (ret)
return ret;
break;
case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_opregion_base_write_handler(vgpu,
*(u32 *)p_data);
if (ret)
return ret;
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
default:
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
}
return 0;
}
/**
* intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
*
* @vgpu: a vGPU
* @primary: is the vGPU presented as primary
*
*/
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
bool primary)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
if (!primary) {
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
}
/* Show guest that there isn't any stolen memory.*/
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
gvt_aperture_pa_base(gvt), true);
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER);
/*
* Clear the bar upper 32bit and let guest to assign the new value
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
/**
* intel_vgpu_reset_cfg_space - reset vGPU configuration space
*
* @vgpu: a vGPU
*
*/
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
{
u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
INTEL_GVT_PCI_CLASS_VGA_OTHER;
if (cmd & PCI_COMMAND_MEMORY) {
trap_gttmmio(vgpu, false);
map_aperture(vgpu, false);
}
/**
* Currently we only do such reset when vGPU is not
* owned by any VM, so we simply restore entire cfg
* space to default value.
*/
intel_vgpu_init_cfg_space(vgpu, primary);
}
| {
"pile_set_name": "Github"
} |
#ifndef CASTOR_CHANNEL_CODER_H
#define CASTOR_CHANNEL_CODER_H
/** \class CastorChannelCoder
Container for ADC<->fQ conversion constants for HCAL/Castor QIE
*/
namespace reco {
namespace castor {
class QieShape;
}
} // namespace reco
class CastorChannelCoder {
public:
CastorChannelCoder(const float fOffset[16], const float fSlope[16]); // [CapId][Range]
/// ADC[0..127]+capid[0..3]->fC conversion
double charge(const reco::castor::QieShape& fShape, int fAdc, int fCapId) const;
/// fC + capid[0..3] -> ADC conversion
int adc(const reco::castor::QieShape& fShape, double fCharge, int fCapId) const;
int index(int fCapId, int Range) { return fCapId * 4 + Range; }
private:
double mOffset[4][4];
double mSlope[4][4];
};
#endif
| {
"pile_set_name": "Github"
} |
/*
** $Id: linit.c,v 1.39.1.1 2017/04/19 17:20:42 roberto Exp $
** Initialization of libraries for lua.c and other clients
** See Copyright Notice in lua.h
*/
#define linit_c
#define LUA_LIB
/*
** If you embed Lua in your program and need to open the standard
** libraries, call luaL_openlibs in your program. If you need a
** different set of libraries, copy this file to your project and edit
** it to suit your needs.
**
** You can also *preload* libraries, so that a later 'require' can
** open the library, which is already linked to the application.
** For that, do the following code:
**
** luaL_getsubtable(L, LUA_REGISTRYINDEX, LUA_PRELOAD_TABLE);
** lua_pushcfunction(L, luaopen_modname);
** lua_setfield(L, -2, modname);
** lua_pop(L, 1); // remove PRELOAD table
*/
#include "lprefix.h"
#include <stddef.h>
#include "lua.h"
#include "lualib.h"
#include "lauxlib.h"
/*
** these libs are loaded by lua.c and are readily available to any Lua
** program
*/
static const luaL_Reg loadedlibs[] = {
{"_G", luaopen_base},
{LUA_LOADLIBNAME, luaopen_package},
{LUA_COLIBNAME, luaopen_coroutine},
{LUA_TABLIBNAME, luaopen_table},
{LUA_IOLIBNAME, luaopen_io},
{LUA_OSLIBNAME, luaopen_os},
{LUA_STRLIBNAME, luaopen_string},
{LUA_MATHLIBNAME, luaopen_math},
{LUA_UTF8LIBNAME, luaopen_utf8},
{LUA_DBLIBNAME, luaopen_debug},
#if defined(LUA_COMPAT_BITLIB)
{LUA_BITLIBNAME, luaopen_bit32},
#endif
{NULL, NULL}
};
LUALIB_API void luaL_openlibs (lua_State *L) {
const luaL_Reg *lib;
/* "require" functions from 'loadedlibs' and set results to global table */
for (lib = loadedlibs; lib->func; lib++) {
luaL_requiref(L, lib->name, lib->func, 1);
lua_pop(L, 1); /* remove lib */
}
}
| {
"pile_set_name": "Github"
} |
["暗班"
,"安插"
,"按額"
,"紅牌"
,"後勤"
,"屘"
,"貿工"
,"賣場"
,"賣出"
,"美編"
,"美商"
,"面談"
,"面試"
,"民營"
,"民營化"
,"盟友"
,"名片"
,"舞男"
,"武士"
,"文案"
,"文宣"
,"樂迷"
,"樂手"
,"儀器"
,"義工"
,"議程"
,"業界"
,"業者"
,"業主"
,"業餘"
,"外包"
,"外銷"
,"原廠"
,"下班"
,"海報"
,"孩"
,"行業"
,"行員"
,"合約"
,"合股"
,"效益"
,"賢"
,"現任"
,"協理"
,"行文"
,"刑警"
,"型錄"
,"行銷"
,"行政"
,"歇工"
,"兇手"
,"休館"
,"好人"
,"復工"
,"復職"
,"戶名"
,"後衛"
,"副業"
,"花花公子"
,"販仔白"
,"繁榮"
,"法人"
,"發包"
,"發票"
,"發達"
,"貨物"
,"貨櫃"
,"會報"
,"會長"
,"會員"
,"匪"
,"廢標"
,"分行"
,"分局"
,"分店"
,"份子"
,"醫界"
,"夜勤"
,"影迷"
,"影星"
,"譯者"
,"引頭路"
,"英商"
,"應徵"
,"榮民"
,"營建"
,"營收"
,"營造"
,"營運"
,"藥廠"
,"勇者"
,"遊民"
,"忍者"
,"任職"
,"人力"
,"人士"
,"人選"
,"人質"
,"弱者"
,"日產"
,"日商"
,"加盟"
,"改途"
,"監工"
,"幹事"
,"工商"
,"工商界"
,"工事"
,"工頭"
,"工讀生"
,"工錢"
,"工作"
,"交貨"
,"價差"
,"開基"
,"開辦"
,"開設"
,"開除"
,"楷模"
,"牽公"
,"刊載"
,"客源"
,"企管"
,"騎士"
,"傾銷"
,"考核"
,"考察"
,"課長"
,"擴建"
,"空服"
,"庫存"
,"權勢"
,"開標"
,"開店"
,"機房"
,"技術"
,"機電"
,"機長"
,"基層"
,"奇美"
,"旗下"
,"兼任"
,"兼職"
,"減產"
,"建商"
,"見習"
,"筊仙"
,"金主"
,"經營"
,"經銷"
,"警界"
,"警廣"
,"警官"
,"警消"
,"警長"
,"警政"
,"警衛"
,"叫客"
,"局長"
,"求職"
,"高階"
,"高檔"
,"各行各業"
,"國營"
,"各界"
,"公安"
,"公營"
,"功利"
,"光臨"
,"功臣"
,"公事"
,"股長"
,"購併"
,"歌迷"
,"歌星"
,"掛名"
,"館長"
,"券商"
,"縣民"
,"官方"
,"決標"
,"內勤"
,"內銷"
,"難民"
,"老鳥"
,"理監事"
,"履歷"
,"履歷表"
,"里長"
,"離職"
,"利害"
,"連任"
,"聯播"
,"連鎖"
,"鄰長"
,"零售"
,"量產"
,"錄用"
,"名號"
,"硬體"
,"年會"
,"領標"
,"學工夫"
,"罷工"
,"罷市"
,"排班"
,"排版"
,"牌照"
,"版面"
,"辦公"
,"辦理"
,"放工"
,"包贌"
,"包商"
,"弊案"
,"弊端"
,"拍片"
,"標頭"
,"品管"
,"品牌"
,"聘"
,"聘約"
,"聘任"
,"聘書"
,"聘請"
,"評審"
,"片商"
,"片廠"
,"票價"
,"票房"
,"判刑"
,"販貨"
,"批准"
,"閉幕"
,"編制"
,"編輯"
,"變態"
,"稟"
,"稟告"
,"筆者"
,"保安"
,"保戶"
,"保固"
,"報價"
,"部門"
,"本業"
,"產物"
,"產能"
,"產量"
,"產品"
,"產銷"
,"產地"
,"散會"
,"三肢手"
,"姓"
,"詩人"
,"施工"
,"辭"
,"辭頭路"
,"時報"
,"辭職"
,"視察"
,"社長"
,"先鋒"
,"𫝛途"
,"設廠"
,"銷量"
,"辛勞"
,"申報"
,"生化"
,"生力軍"
,"生產"
,"升遷"
,"成交"
,"承攬"
,"承辦"
,"承包"
,"成品"
,"承接"
,"成就"
,"小工"
,"商務"
,"商機"
,"商隊"
,"常務"
,"上班"
,"失業"
,"收發"
,"收工"
,"囚犯"
,"受害者"
,"受訓"
,"售價"
,"受託人"
,"訴求"
,"素材"
,"署長"
,"士"
,"事務所"
,"事業"
,"士官"
,"選單"
,"散戶"
,"散工"
,"巡邏"
,"巡視"
,"代言人"
,"代工"
,"代價"
,"代銷"
,"擔任"
,"擔當"
,"動工"
,"底價"
,"塌跤"
,"頭路"
,"退還"
,"退休"
,"退股"
,"替手"
,"提案"
,"提拔"
,"天價"
,"天王"
,"聽友"
,"停工"
,"通告"
,"通路"
,"傳媒"
,"團長"
,"推案"
,"店長"
,"典範"
,"電台"
,"調動"
,"調查"
,"特約"
,"特價"
,"特權"
,"徵召"
,"訂戶"
,"訂貨"
,"訂製"
,"訂單"
,"中鋒"
,"中油"
,"中生代"
,"重建"
,"值班"
,"直播"
,"直銷"
,"長"
,"倒會"
,"倒店"
,"道行"
,"盜版"
,"黨員"
,"同業"
,"同仁"
,"徒"
,"才女"
,"在職"
,"層級"
,"摠頭"
,"製藥"
,"製品"
,"製片"
,"製成"
,"炒作"
,"採訪"
,"菜鳥"
,"裁判"
,"裁員"
,"插代誌"
,"市面"
,"車迷"
,"車價"
,"車商"
,"車主"
,"簽訂"
,"倩"
,"超人"
,"撨摵"
,"請辭"
,"請示"
,"促銷"
,"沖銷"
,"廠商"
,"廠長"
,"匠"
,"創業"
,"創立"
,"創辦"
,"初學者"
,"粗工"
,"取景"
,"處長"
,"揣頭路"
,"出貨"
,"出勤"
,"出品"
,"出師"
,"出頭天"
,"出張"
,"出差"
,"出廠"
,"指令"
,"志工"
,"食家己"
,"食頭路"
,"僭位"
,"前鋒"
,"前途"
,"接管"
,"接班"
,"接手"
,"接單"
,"截稿"
,"績效"
,"進貨"
,"進用"
,"菁英"
,"偵探"
,"精裝"
,"整建"
,"贈品"
,"獎券"
,"執業"
,"執勤"
,"執照"
,"職訓"
,"職缺"
,"職能"
,"職責"
,"酒家"
,"酒仙"
,"酒店"
,"就業"
,"就職"
,"上目"
,"上任"
,"做官"
,"造船"
,"作為"
,"總務"
,"組別"
,"組長"
,"助理"
,"資深"
,"主任"
,"主播"
,"專訪"
,"專任"
,"專人"
,"專櫃"
,"專職"
,"專員"
,"轉任"
,"轉入"
,"轉進"
,"全職"
,"罪犯"
,"水利"
,"水手"
,"尊"
,"除名"
,"大賣場"
,"大名"
,"單跤手"
,"完工"
,"熨金"
]
| {
"pile_set_name": "Github"
} |
# This needs to be the first configured virtualhost on port 80 so that
# requests to http://localhost hit this rather than any other vhost
<VirtualHost *:80>
<Location />
SetHandler server-status
Require ip 127.0.0.1
</Location>
</VirtualHost>
| {
"pile_set_name": "Github"
} |
import config from './config';
export default config.reducer;
| {
"pile_set_name": "Github"
} |
<?php
/**
* Handle renamed filters
*
* @package Give
*/
$give_map_deprecated_filters = give_deprecated_filters();
foreach ( $give_map_deprecated_filters as $new => $old ) {
add_filter( $new, 'give_deprecated_filter_mapping', 10, 4 );
}
/**
* Deprecated filters.
*
* @return array An array of deprecated Give filters.
*/
function give_deprecated_filters() {
$give_deprecated_filters = array(
// New filter hook Old filter hook.
'give_donation_data_before_gateway' => 'give_purchase_data_before_gateway',
'give_donation_form_required_fields' => 'give_purchase_form_required_fields',
'give_donation_stats_by_user' => 'give_purchase_stats_by_user',
'give_donation_from_name' => 'give_purchase_from_name',
'give_donation_from_address' => 'give_purchase_from_address',
'give_get_users_donations_args' => 'give_get_users_purchases_args',
'give_recount_donors_donation_statuses' => 'give_recount_customer_payment_statuses',
'give_donor_recount_should_process_donation' => 'give_customer_recount_should_process_payment',
'give_reset_items' => 'give_reset_store_items',
'give_decrease_donations_on_undo' => 'give_decrease_sales_on_undo',
'give_decrease_earnings_on_pending' => 'give_decrease_store_earnings_on_pending',
'give_decrease_donor_value_on_pending' => 'give_decrease_customer_value_on_pending',
'give_decrease_donors_donation_count_on_pending' => 'give_decrease_customer_purchase_count_on_pending',
'give_decrease_earnings_on_cancelled' => 'give_decrease_store_earnings_on_cancelled',
'give_decrease_donor_value_on_cancelled' => 'give_decrease_customer_value_on_cancelled',
'give_decrease_donors_donation_count_on_cancelled' => 'give_decrease_customer_purchase_count_on_cancelled',
'give_decrease_earnings_on_revoked' => 'give_decrease_store_earnings_on_revoked',
'give_decrease_donor_value_on_revoked' => 'give_decrease_customer_value_on_revoked',
'give_decrease_donors_donation_count_on_revoked' => 'give_decrease_customer_purchase_count_on_revoked',
'give_edit_donors_role' => 'give_edit_customers_role',
'give_edit_donor_info' => 'give_edit_customer_info',
'give_edit_donor_address' => 'give_edit_customer_address',
'give_donor_tabs' => 'give_customer_tabs',
'give_donor_views' => 'give_customer_views',
'give_view_donors_role' => 'give_view_customers_role',
'give_report_donor_columns' => 'give_report_customer_columns',
'give_report_sortable_donor_columns' => 'give_report_sortable_customer_columns',
'give_undo_donation_statuses' => 'give_undo_purchase_statuses',
'give_donor_recount_should_increase_value' => 'give_customer_recount_sholud_increase_value',
'give_donor_recount_should_increase_count' => 'give_customer_recount_should_increase_count',
'give_donation_amount' => 'give_payment_amount',
'give_get_donation_form_title' => 'give_get_payment_form_title',
'give_decrease_earnings_on_refunded' => 'give_decrease_store_earnings_on_refund',
'give_decrease_donor_value_on_refunded' => 'give_decrease_customer_value_on_refund',
'give_decrease_donors_donation_count_on_refunded' => 'give_decrease_customer_purchase_count_on_refund',
'give_should_process_refunded' => 'give_should_process_refund',
'give_settings_export_excludes' => 'settings_export_excludes',
'give_ajax_form_search_response' => 'give_ajax_form_search_responce',
);
return $give_deprecated_filters;
}
/**
* Deprecated filter mapping.
*
* @param mixed $data
* @param string $arg_1 Passed filter argument 1.
* @param string $arg_2 Passed filter argument 2.
* @param string $arg_3 Passed filter argument 3.
*
* @return mixed
*/
function give_deprecated_filter_mapping( $data, $arg_1 = '', $arg_2 = '', $arg_3 = '' ) {
$give_map_deprecated_filters = give_deprecated_filters();
$filter = current_filter();
if ( isset( $give_map_deprecated_filters[ $filter ] ) ) {
if ( has_filter( $give_map_deprecated_filters[ $filter ] ) ) {
$data = apply_filters( $give_map_deprecated_filters[ $filter ], $data, $arg_1, $arg_2, $arg_3 );
if ( ! defined( 'DOING_AJAX' ) ) {
_give_deprecated_function(
sprintf( /* translators: %s: filter name */
__( 'The %s filter' ),
$give_map_deprecated_filters[ $filter ]
),
'1.7',
$filter
);
}
}
}
return $data;
}
| {
"pile_set_name": "Github"
} |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import "os"
type fileAddr string
func (fileAddr) Network() string { return "file+net" }
func (f fileAddr) String() string { return string(f) }
// FileConn returns a copy of the network connection corresponding to
// the open file f.
// It is the caller's responsibility to close f when finished.
// Closing c does not affect f, and closing f does not affect c.
func FileConn(f *os.File) (c Conn, err error) {
c, err = fileConn(f)
if err != nil {
err = &OpError{Op: "file", Net: "file+net", Source: nil, Addr: fileAddr(f.Name()), Err: err}
}
return
}
// FileListener returns a copy of the network listener corresponding
// to the open file f.
// It is the caller's responsibility to close ln when finished.
// Closing ln does not affect f, and closing f does not affect ln.
func FileListener(f *os.File) (ln Listener, err error) {
ln, err = fileListener(f)
if err != nil {
err = &OpError{Op: "file", Net: "file+net", Source: nil, Addr: fileAddr(f.Name()), Err: err}
}
return
}
// FilePacketConn returns a copy of the packet network connection
// corresponding to the open file f.
// It is the caller's responsibility to close f when finished.
// Closing c does not affect f, and closing f does not affect c.
func FilePacketConn(f *os.File) (c PacketConn, err error) {
c, err = filePacketConn(f)
if err != nil {
err = &OpError{Op: "file", Net: "file+net", Source: nil, Addr: fileAddr(f.Name()), Err: err}
}
return
}
| {
"pile_set_name": "Github"
} |
'use strict';
const { contract, web3 } = require('@nomiclabs/buidler');
const { assert, addSnapshotBeforeRestoreAfterEach } = require('./common');
const { setupContract } = require('./setup');
const {
constants: { inflationStartTimestampInSecs, ZERO_ADDRESS },
} = require('../..');
const {
toUnit,
divideDecimal,
fastForwardTo,
multiplyDecimal,
powerToDecimal,
} = require('../utils')();
const { onlyGivenAddressCanInvoke, ensureOnlyExpectedMutativeFunctions } = require('./helpers');
const BN = require('bn.js');
contract('SupplySchedule', async accounts => {
const initialWeeklySupply = divideDecimal(75000000, 52); // 75,000,000 / 52 weeks
const inflationStartDate = inflationStartTimestampInSecs;
const [, owner, synthetix, account1, account2] = accounts;
let supplySchedule, synthetixProxy, decayRate;
function getDecaySupplyForWeekNumber(initialAmount, weekNumber) {
const effectiveRate = powerToDecimal(toUnit(1).sub(decayRate), weekNumber);
const supplyForWeek = multiplyDecimal(effectiveRate, initialAmount);
return supplyForWeek;
}
addSnapshotBeforeRestoreAfterEach(); // ensure EVM timestamp resets to inflationStartDate
beforeEach(async () => {
supplySchedule = await setupContract({ accounts, contract: 'SupplySchedule' });
synthetixProxy = await setupContract({ accounts, contract: 'ProxyERC20' });
await supplySchedule.setSynthetixProxy(synthetixProxy.address, { from: owner });
await synthetixProxy.setTarget(synthetix, { from: owner });
decayRate = await supplySchedule.DECAY_RATE();
});
it('only expected functions should be mutative', () => {
ensureOnlyExpectedMutativeFunctions({
abi: supplySchedule.abi,
ignoreParents: ['Owned'],
expected: ['recordMintEvent', 'setMinterReward', 'setSynthetixProxy'],
});
});
it('should set constructor params on deployment', async () => {
// constructor(address _owner, uint _lastMintEvent, uint _currentWeek) //
const lastMintEvent = 0;
const weekCounter = 0;
const instance = await setupContract({
accounts,
contract: 'SupplySchedule',
args: [account1, lastMintEvent, weekCounter],
});
const weeklyIssuance = divideDecimal(75e6, 52);
assert.equal(await instance.owner(), account1);
assert.bnEqual(await instance.lastMintEvent(), 0);
assert.bnEqual(await instance.weekCounter(), 0);
assert.bnEqual(await instance.INITIAL_WEEKLY_SUPPLY(), weeklyIssuance);
});
describe('linking synthetix', async () => {
it('should have set synthetix proxy', async () => {
const synthetixProxy = await supplySchedule.synthetixProxy();
assert.equal(synthetixProxy, synthetixProxy);
});
it('should revert when setting synthetix proxy to ZERO_ADDRESS', async () => {
await assert.revert(supplySchedule.setSynthetixProxy(ZERO_ADDRESS, { from: owner }));
});
it('should emit an event when setting synthetix proxy', async () => {
const txn = await supplySchedule.setSynthetixProxy(account2, { from: owner });
assert.eventEqual(txn, 'SynthetixProxyUpdated', {
newAddress: account2,
});
});
it('should disallow a non-owner from setting the synthetix proxy', async () => {
await onlyGivenAddressCanInvoke({
fnc: supplySchedule.setSynthetixProxy,
args: [account2],
address: owner,
accounts,
});
});
});
describe('functions and modifiers', async () => {
it('should allow owner to update the minter reward amount', async () => {
const existingReward = await supplySchedule.minterReward();
const newReward = existingReward.sub(toUnit('10'));
const minterRewardUpdatedEvent = await supplySchedule.setMinterReward(newReward, {
from: owner,
});
assert.eventEqual(minterRewardUpdatedEvent, 'MinterRewardUpdated', {
newRewardAmount: newReward,
});
assert.bnEqual(await supplySchedule.minterReward(), newReward);
});
it('should disallow a non-owner from setting the minter reward amount', async () => {
await onlyGivenAddressCanInvoke({
fnc: supplySchedule.setMinterReward,
args: ['0'],
address: owner,
accounts,
});
});
describe('exponential decay supply with initial weekly supply of 1.44m', async () => {
it('check calculating week 1 of inflation decay is valid', async () => {
const decay = multiplyDecimal(decayRate, initialWeeklySupply);
const expectedIssuance = initialWeeklySupply.sub(decay);
// check expectedIssuance of week 1 is same as getDecaySupplyForWeekNumber
// bnClose as decimal multiplication has rounding
assert.bnClose(expectedIssuance, getDecaySupplyForWeekNumber(initialWeeklySupply, 1));
// bnClose as tokenDecaySupply is calculated using the decayRate (rounding down)
// and not subtraction from initialWeeklySupply.
assert.bnClose(await supplySchedule.tokenDecaySupplyForWeek(1), expectedIssuance);
});
it('should calculate Week 2 Supply of inflation decay from initial weekly supply', async () => {
const expectedIssuance = getDecaySupplyForWeekNumber(initialWeeklySupply, 2);
assert.bnEqual(await supplySchedule.tokenDecaySupplyForWeek(2), expectedIssuance);
});
it('should calculate Week 3 Supply of inflation decay from initial weekly supply', async () => {
const expectedIssuance = getDecaySupplyForWeekNumber(initialWeeklySupply, 2);
const supply = await supplySchedule.tokenDecaySupplyForWeek(2);
assert.bnEqual(supply, expectedIssuance);
});
it('should calculate Week 10 Supply of inflation decay from initial weekly supply', async () => {
const expectedIssuance = getDecaySupplyForWeekNumber(initialWeeklySupply, 10);
assert.bnEqual(await supplySchedule.tokenDecaySupplyForWeek(10), expectedIssuance);
});
it('should calculate Week 11 Supply of inflation decay from initial weekly supply', async () => {
const expectedIssuance = getDecaySupplyForWeekNumber(initialWeeklySupply, 11);
assert.bnEqual(await supplySchedule.tokenDecaySupplyForWeek(11), expectedIssuance);
});
it('should calculate last Week 195 Supply of inflation decay from initial weekly supply', async () => {
const expectedIssuance = getDecaySupplyForWeekNumber(initialWeeklySupply, 195);
const supply = await supplySchedule.tokenDecaySupplyForWeek(195);
assert.bnEqual(supply, expectedIssuance);
});
});
describe('terminal inflation supply with initial total supply of 1,000,000', async () => {
let weeklySupplyRate;
// Calculate the compound supply for numberOfPeriods (weeks) and initial principal
// as supply at the beginning of the periods.
function getCompoundSupply(principal, weeklyRate, numberOfPeriods) {
// calcualte effective compound rate for number of weeks to 18 decimals precision
const effectiveRate = powerToDecimal(toUnit(1).add(weeklyRate), numberOfPeriods);
// supply = P * ( (1 + weeklyRate)^weeks) - 1)
return multiplyDecimal(effectiveRate.sub(toUnit(1)), principal);
}
beforeEach(async () => {
const terminalAnnualSupplyRate = await supplySchedule.TERMINAL_SUPPLY_RATE_ANNUAL();
weeklySupplyRate = terminalAnnualSupplyRate.div(new BN(52));
});
// check initalAmount * weeklySupplyRate for 1 week is expected amount
it('should calculate weekly supply for 1 week at 1.25pa% with 1m principal', async () => {
const intialAmount = 1e6; // 1,000,000
const expectedAmount = multiplyDecimal(intialAmount, weeklySupplyRate); // 12,500
assert.bnEqual(
await supplySchedule.terminalInflationSupply(intialAmount, 1),
expectedAmount
);
});
it('should calculate compounded weekly supply for 2 weeks at 1.25pa%', async () => {
const intialAmount = toUnit(1e6); // 1,000,000
const expectedAmount = getCompoundSupply(intialAmount, weeklySupplyRate, 2);
const result = await supplySchedule.terminalInflationSupply(intialAmount, 2);
assert.bnClose(result, expectedAmount);
});
it('should calculate compounded weekly supply for 4 weeks at 1.25pa%', async () => {
const intialAmount = toUnit(1e6); // 1,000,000
const expectedAmount = getCompoundSupply(intialAmount, weeklySupplyRate, 4);
const result = await supplySchedule.terminalInflationSupply(intialAmount, 4);
assert.bnEqual(result, expectedAmount);
});
it('should calculate compounded weekly supply with principal 10m for 10 weeks at 1.25pa%', async () => {
const intialAmount = toUnit(10e6); // 10,000,000
const expectedAmount = getCompoundSupply(intialAmount, weeklySupplyRate, 10);
const result = await supplySchedule.terminalInflationSupply(intialAmount, 10);
assert.bnEqual(result, expectedAmount);
});
it('should calculate compounded weekly supply with principal 260,387,945 for 1 week at 1.25pa%', async () => {
const initialAmount = toUnit(260387945); // 260,387,945
const expectedAmount = getCompoundSupply(initialAmount, weeklySupplyRate, 1);
// check compound supply for 1 week is correct
assert.bnEqual(expectedAmount, multiplyDecimal(initialAmount, weeklySupplyRate)); // ~125,187
const result = await supplySchedule.terminalInflationSupply(initialAmount, 1);
assert.bnEqual(result, expectedAmount);
});
it('should calculate compounded weekly supply with principal 260,387,945 for 2 weeks at 1.25pa%', async () => {
const initialAmount = toUnit(260387945); // 260,387,945
const expectedAmount = getCompoundSupply(initialAmount, weeklySupplyRate, 2);
const result = await supplySchedule.terminalInflationSupply(initialAmount, 2);
assert.bnEqual(result, expectedAmount);
});
it('should calculate compounded weekly supply with principal 260,387,945 for 10 weeks at 1.25pa%', async () => {
const initialAmount = toUnit(260387945); // 260,387,945
const expectedAmount = getCompoundSupply(initialAmount, weeklySupplyRate, 10);
const result = await supplySchedule.terminalInflationSupply(initialAmount, 10);
assert.bnEqual(result, expectedAmount);
});
it('should calculate compounded weekly supply with principal 260,387,945 for 100 weeks at 1.25pa%', async () => {
const initialAmount = toUnit(260387945); // 260,387,945
const expectedAmount = getCompoundSupply(initialAmount, weeklySupplyRate, 100);
const result = await supplySchedule.terminalInflationSupply(initialAmount, 100);
assert.bnEqual(result, expectedAmount);
});
});
describe('mintable supply', async () => {
const DAY = 60 * 60 * 24;
const WEEK = 604800;
const weekOne = inflationStartDate + 3600 + 1 * DAY; // 1 day and 60 mins within first week of Inflation supply > Inflation supply as 1 day buffer is added to lastMintEvent
async function checkMintedValues(
mintedSupply = new BN(0),
weeksIssued,
instance = supplySchedule
) {
const weekCounterBefore = await instance.weekCounter();
// call updateMintValues to mimic synthetix issuing tokens
const transaction = await instance.recordMintEvent(mintedSupply, {
from: synthetix,
});
const weekCounterAfter = weekCounterBefore.add(new BN(weeksIssued));
const lastMintEvent = await instance.lastMintEvent();
assert.bnEqual(await instance.weekCounter(), weekCounterAfter);
// lastMintEvent is updated to number of weeks after inflation start date + 1 DAY buffer
assert.ok(
lastMintEvent.toNumber() === inflationStartDate + weekCounterAfter * WEEK + 1 * DAY
);
// check event emitted has correct amounts of supply
assert.eventEqual(transaction, 'SupplyMinted', {
supplyMinted: mintedSupply,
numberOfWeeksIssued: new BN(weeksIssued),
lastMintEvent: lastMintEvent,
});
}
it('should calculate the mintable supply as 0 within 1st week in year 2 ', async () => {
const expectedIssuance = web3.utils.toBN(0);
// fast forward EVM to Week 1 in Year 2 schedule starting at UNIX 1552435200+
await fastForwardTo(new Date(weekOne * 1000));
assert.bnEqual(await supplySchedule.mintableSupply(), expectedIssuance);
});
it('should calculate the mintable supply for 1 weeks in year 2 in week 2 - 75M supply', async () => {
const expectedIssuance = initialWeeklySupply;
const inWeekTwo = weekOne + WEEK;
// fast forward EVM to Week 2 in Year 2 schedule starting at UNIX 1552435200+
await fastForwardTo(new Date(inWeekTwo * 1000));
assert.bnEqual(await supplySchedule.mintableSupply(), expectedIssuance);
});
it('should calculate the mintable supply for 2 weeks in year 2 in week 3 - 75M supply', async () => {
const expectedIssuance = initialWeeklySupply.mul(new BN(2));
const inWeekThree = weekOne + 2 * WEEK;
// fast forward EVM to within Week 3 in Year 2 schedule starting at UNIX 1552435200+
await fastForwardTo(new Date(inWeekThree * 1000));
assert.bnEqual(await supplySchedule.mintableSupply(), expectedIssuance);
});
it('should calculate the mintable supply for 3 weeks in year 2 in week 4 - 75M supply', async () => {
const expectedIssuance = initialWeeklySupply.mul(new BN(3));
const inWeekFour = weekOne + 3 * WEEK;
// fast forward EVM to within Week 4 in Year 2 schedule starting at UNIX 1552435200+
await fastForwardTo(new Date(inWeekFour * 1000));
assert.bnEqual(await supplySchedule.mintableSupply(), expectedIssuance);
});
it('should calculate the mintable supply for 39 weeks without decay in Year 2 - 75M supply', async () => {
const expectedIssuance = initialWeeklySupply.mul(new BN(39));
const weekFourty = weekOne + 39 * WEEK;
// fast forward EVM to within Week 40 starting at UNIX 1552435200+
await fastForwardTo(new Date(weekFourty * 1000));
// bnClose as weeklyIssuance.mul(new BN(3)) rounding
assert.bnClose(await supplySchedule.mintableSupply(), expectedIssuance);
});
it('should calculate the mintable supply for 39 weeks without decay, 1 week with decay in week 41', async () => {
// add 39 weeks of inflationary supply
let expectedIssuance = initialWeeklySupply.mul(new BN(39));
// add Week 40 of decay supply
expectedIssuance = expectedIssuance.add(
getDecaySupplyForWeekNumber(initialWeeklySupply, 1)
);
const weekFourtyOne = weekOne + 40 * WEEK;
// fast forward EVM to within Week 41 schedule starting at UNIX 1552435200+
await fastForwardTo(new Date(weekFourtyOne * 1000));
assert.bnClose(await supplySchedule.mintableSupply(), expectedIssuance);
});
it('should calculate the mintable supply for 39 weeks without decay, 2 weeks with decay in week 42', async () => {
// add 39 weeks of inflationary supply
let expectedIssuance = initialWeeklySupply.mul(new BN(39));
// add Week 40 & 41 of decay supply
const week40Supply = getDecaySupplyForWeekNumber(initialWeeklySupply, 1);
const week41Supply = getDecaySupplyForWeekNumber(initialWeeklySupply, 2);
expectedIssuance = expectedIssuance.add(week40Supply).add(week41Supply);
const weekFourtyTwo = weekOne + 41 * WEEK;
// fast forward EVM to within Week 41 schedule starting at UNIX 1552435200+
await fastForwardTo(new Date(weekFourtyTwo * 1000));
assert.bnClose(await supplySchedule.mintableSupply(), expectedIssuance);
});
it('should calculate mintable supply of 1x week after minting', async () => {
// fast forward EVM to Week 2 after UNIX 1552435200+
const weekTwo = weekOne + 1 * WEEK;
await fastForwardTo(new Date(weekTwo * 1000));
const mintableSupply = await supplySchedule.mintableSupply();
// fake updateMintValues
await checkMintedValues(mintableSupply, 1);
// Fast forward to week 2
const weekThree = weekTwo + WEEK + 1 * DAY;
// Expect only 1 extra week is mintable after first week minted
await fastForwardTo(new Date(weekThree * 1000));
assert.bnEqual(await supplySchedule.mintableSupply(), initialWeeklySupply);
});
it('should calculate mintable supply of 2 weeks if 2+ weeks passed, after minting', async () => {
// fast forward EVM to Week 2 in Year 2 schedule starting at UNIX 1552435200+
const weekTwo = weekOne + 1 * WEEK;
await fastForwardTo(new Date(weekTwo * 1000));
// Mint the first week of supply
const mintableSupply = await supplySchedule.mintableSupply();
// fake updateMintValues
await checkMintedValues(mintableSupply, 1);
// fast forward 2 weeks to within week 4
const weekFour = weekTwo + 2 * WEEK + 1 * DAY; // Sometime within week four
// // Expect 2 week is mintable after first week minted
const expectedIssuance = initialWeeklySupply.mul(new BN(2));
await fastForwardTo(new Date(weekFour * 1000));
// fake minting 2 weeks again
await checkMintedValues(expectedIssuance, 2);
});
describe('rounding down lastMintEvent to number of weeks issued since inflation start date', async () => {
it('should have 0 mintable supply, only after 1 day, if minting was 5 days late', async () => {
// fast forward EVM to Week 2 in
const weekTwoAndFiveDays = weekOne + 1 * WEEK + 5 * DAY;
await fastForwardTo(new Date(weekTwoAndFiveDays * 1000));
// Mint the first week of supply
const mintableSupply = await supplySchedule.mintableSupply();
// fake updateMintValues
await checkMintedValues(mintableSupply, 1);
// fast forward +1 day, should not be able to mint again
const weekTwoAndSixDays = weekTwoAndFiveDays + 1 * DAY; // Sometime within week two
// Expect no supply is mintable as still within weekTwo
await fastForwardTo(new Date(weekTwoAndSixDays * 1000));
assert.bnEqual(await supplySchedule.mintableSupply(), new BN(0));
});
it('should be 1 week of mintable supply, after 2+ days, if minting was 5 days late', async () => {
// fast forward EVM to Week 2 in
const weekTwoAndFiveDays = weekOne + 1 * WEEK + 5 * DAY;
await fastForwardTo(new Date(weekTwoAndFiveDays * 1000));
// Mint the first week of supply
const mintableSupply = await supplySchedule.mintableSupply();
// fake updateMintValues
await checkMintedValues(mintableSupply, 1);
// fast forward +2 days, should be able to mint again
const weekThree = weekTwoAndFiveDays + 2 * DAY; // Sometime within week three
// Expect 1 week is mintable after first week minted
const expectedIssuance = initialWeeklySupply.mul(new BN(1));
await fastForwardTo(new Date(weekThree * 1000));
// fake minting 1 week again
await checkMintedValues(expectedIssuance, 1);
});
it('should calculate 2 weeks of mintable supply after 1 week and 2+ days, if minting was 5 days late in week 2', async () => {
// fast forward EVM to Week 2 but not whole week 2
const weekTwoAndFiveDays = weekOne + 1 * WEEK + 5 * DAY;
await fastForwardTo(new Date(weekTwoAndFiveDays * 1000));
// Mint the first week of supply
const mintableSupply = await supplySchedule.mintableSupply();
// fake updateMintValues
await checkMintedValues(mintableSupply, 1);
// fast forward 1 week and +2 days, should be able to mint again
const withinWeekFour = weekTwoAndFiveDays + 1 * WEEK + 2 * DAY; // Sometime within week three
// Expect 1 week is mintable after first week minted
const expectedIssuance = initialWeeklySupply.mul(new BN(2));
await fastForwardTo(new Date(withinWeekFour * 1000));
// fake minting 1 week again
await checkMintedValues(expectedIssuance, 2);
});
});
describe('setting weekCounter and lastMintEvent on supplySchedule to week 39', async () => {
let instance, lastMintEvent;
beforeEach(async () => {
// constructor(address _owner, uint _lastMintEvent, uint _currentWeek) //
lastMintEvent = 1575552876; // Thursday, 5 December 2019 13:34:36
const weekCounter = 39; // latest week
instance = await setupContract({
accounts,
contract: 'SupplySchedule',
args: [owner, lastMintEvent, weekCounter],
});
// setup new instance
await instance.setSynthetixProxy(synthetixProxy.address, { from: owner });
await synthetixProxy.setTarget(synthetix, { from: owner });
});
it('should calculate week 40 as week 1 of decay ', async () => {
const decay = multiplyDecimal(decayRate, initialWeeklySupply);
const expectedIssuance = initialWeeklySupply.sub(decay);
// fast forward EVM by 1 WEEK to inside Week 41
const inWeek41 = lastMintEvent + 1 * WEEK + 500;
await fastForwardTo(new Date(inWeek41 * 1000));
// Mint the first week of supply
const mintableSupply = await instance.mintableSupply();
assert.bnClose(expectedIssuance, mintableSupply);
// call recordMintEvent
await checkMintedValues(mintableSupply, 1, instance);
});
it('should calculate week 41 as week 2 of decay ', async () => {
const weeks = 2;
let expectedIssuance = new BN();
for (let i = 1; i <= weeks; i++) {
expectedIssuance = expectedIssuance.add(
getDecaySupplyForWeekNumber(initialWeeklySupply, new BN(i))
);
}
// fast forward EVM by 2 WEEK to inside Week 41
const inWeek42 = lastMintEvent + 2 * WEEK + 500;
await fastForwardTo(new Date(inWeek42 * 1000));
// Mint the first week of supply
const mintableSupply = await instance.mintableSupply();
assert.bnClose(expectedIssuance, mintableSupply);
// call recordMintEvent
await checkMintedValues(mintableSupply, weeks, instance);
});
it('should calculate week 45 as week 6 of decay ', async () => {
const weeks = 6;
let expectedIssuance = new BN();
for (let i = 1; i <= weeks; i++) {
expectedIssuance = expectedIssuance.add(
getDecaySupplyForWeekNumber(initialWeeklySupply, i)
);
}
// fast forward EVM by 6 WEEK to inside Week 45
const inWeek42 = lastMintEvent + 6 * WEEK + 500;
await fastForwardTo(new Date(inWeek42 * 1000));
// Mint the first week of supply
const mintableSupply = await instance.mintableSupply();
assert.bnClose(expectedIssuance, mintableSupply);
// call recordMintEvent
await checkMintedValues(mintableSupply, weeks, instance);
});
});
describe('setting weekCounter and lastMintEvent on supplySchedule to week 233', async () => {
let instance, lastMintEvent;
beforeEach(async () => {
// constructor(address _owner, uint _lastMintEvent, uint _currentWeek) //
lastMintEvent = inflationStartDate + 233 * WEEK; // 2019-03-06 + 233 weeks = 23 August 2023 00:00:00
const weekCounter = 233; // latest week
instance = await setupContract({
accounts,
contract: 'SupplySchedule',
args: [owner, lastMintEvent, weekCounter],
});
// setup new instance
await instance.setSynthetixProxy(synthetixProxy.address, { from: owner });
await synthetixProxy.setTarget(synthetix, { from: owner });
});
it('should calculate week 234 as last week of decay (195th) ', async () => {
const numberOfWeeks = 1;
const expectedIssuance = getDecaySupplyForWeekNumber(initialWeeklySupply, 195);
// fast forward EVM by 1 WEEK to inside Week 234
const inWeek234 = lastMintEvent + numberOfWeeks * WEEK + 500;
await fastForwardTo(new Date(inWeek234 * 1000));
// Mint the first week of supply
const mintableSupply = await instance.mintableSupply();
assert.bnClose(expectedIssuance, mintableSupply);
// call recordMintEvent
await checkMintedValues(mintableSupply, numberOfWeeks, instance);
});
});
});
});
});
| {
"pile_set_name": "Github"
} |
index_echeck=$1 Возможно, он не установлен или <a href='$2'>конфигурация модуля</a> неверна.
index_edb=Не удалось подключиться к базе данных Bacula: $1 Возможно, она не настроена или <a href='$2'>конфигурация модуля</a> неверна.
index_eng=Не удалось подключиться к базе данных групп Bacula: $1. Возможно, его не существует или <a href='$2'>конфигурация модуля</a> неверна.
index_econsole=Команде консоли Bacula $1 не удалось связаться с директором Bacula. Убедитесь, что пароль в $2 правильный.
index_econsole2=Команда консоли Bacula $1 не настроена с допустимым хостом директора Bacula. В настоящее время используется $2, которого не существует.
index_fixpass=Нажмите здесь, чтобы исправить пароль консоли
index_fixaddr=Нажмите здесь, чтобы исправить Bacula Director Host
index_stop=Стоп Бакула
index_stopdesc=Нажмите эту кнопку, чтобы закрыть процессы демона Bacula, перечисленные выше.
index_start=Старт Bacula
index_startdesc=Нажмите эту кнопку, чтобы запустить процессы демона Bacula, перечисленные выше.
index_restart=Перезапустите Bacula
index_restartdesc=Нажмите эту кнопку, чтобы остановить и перезапустить процессы демона Bacula, перечисленные выше. Это может быть необходимо для активации конфигурации устройства хранения.
index_apply=Применить конфигурацию
index_applydesc=Нажмите эту кнопку, чтобы активировать конфигурацию директора Bacula, показанную выше.
index_boot=Начать при загрузке
index_bootdesc=Измените эту опцию, чтобы контролировать, запускается ли Bacula во время загрузки системы.
index_status=Статусы процесса:
index_up=вверх
index_down=вниз
index_return=индекс модуля
index_versionbacula=Бакула $1
index_versionbareos=Бареос $1
index_notrun=Резервное копирование и другие операции не могут быть выполнены, так как демон Bacula Directory не работает.
index_eversion=Ваша система использует Bacula версии $2, но этот модуль Webmin поддерживает только версии $1 и выше.
index_dir=Конфигурация директора
index_sd=Конфигурация демона хранилища
index_fd=Конфигурация File Daemon
index_groups=Конфигурация группы Bacula
index_actions=Резервное копирование и восстановление действий
index_ocmin=Предоставлено <a href=$2 target=_new>Linmin</a> </a>
connect_emysql=Не удалось загрузить драйвер DBI базы данных $1
connect_elogin=Не удалось войти в базу данных $1:$2.
connect_equery=База данных $1 не содержит таблиц Bacula.
connect_equery2=Это может быть связано с тем, что установленный модуль Perl SQLite слишком новый и не поддерживает более старый формат базы данных SQLite, используемый Bacula.
connect_equery3=База данных $1 не содержит таблицы групп OC Bacula.
esql=Ошибка SQL : $1
check_edir=Каталог конфигурации Bacula $1 не найден в вашей системе.
check_ebacula=Управляющая команда Bacula $1 не найдена.
check_econsole=Команда консоли Bacula $1 не найдена.
check_edirector=Файл конфигурации директора Bacula $1 не найден.
check_eclient=Эта система выглядит скорее как <a href='$2'>клиент Bacula</a>, а не как директор.
check_econfigs=Файлы конфигурации Bacula не найдены в $1
check_eservers=Группы серверов Webmin не определены
check_engmod=Модуль групп OpenCountry Bacula не установлен
proc_bacula-sd=Демон хранения
proc_bacula-fd=Файловый демон
proc_bacula-dir=Bacula Режиссер-демон
proc_bareos-sd=Демон хранения
proc_bareos-fd=Файловый демон
proc_bareos-dir=Bacula Режиссер-демон
stop_err=Не удалось остановить Bacula
start_err=Не удалось запустить Bacula
start_einit=Не найден сценарий инициализации для $1
start_erun=Не удалось запустить $1 : $2
restart_err=Не удалось перезапустить Bacula
apply_err=Не удалось применить конфигурацию
apply_failed=Обнаружена ошибка конфигурации
apply_problem=Не удалось применить конфигурацию : $1
jobs_title=Резервное копирование
jobs_none=Задания резервного копирования еще не определены.
jobs_name=Название работы
jobs_deftype=Значения по умолчанию?
jobs_type=Тип вакансии
jobs_client=Клиент для резервного копирования
jobs_fileset=Файл установлен для резервного копирования
jobs_schedule=Расписание резервного копирования
jobs_add=Добавьте новое задание резервного копирования.
jobs_delete=Удалить выбранные вакансии
jobs_return=список вакансий
jobs_derr=Не удалось удалить задания
filesets_title=Наборы файлов
filesets_none=Наборы резервных файлов еще не определены.
filesets_name=Имя набора файлов
filesets_files=Включенные файлы
filesets_add=Добавьте новый набор файлов резервных копий.
filesets_delete=Удалить выбранные наборы файлов
filesets_return=список наборов файлов
filesets_derr=Не удалось удалить наборы файлов
filesets_ednone=Не выбрано, ничего не выбрано
fileset_title1=Создать набор файлов
fileset_title2=Изменить набор файлов
fileset_header=Подробности набора резервных файлов
fileset_egone=Набор файлов больше не существует!
fileset_name=Имя набора файлов
fileset_include=Файлы и каталоги для резервного копирования
fileset_exclude=Файлы и каталоги, чтобы пропустить
fileset_sig=Тип подписи файла
fileset_none=Никто
fileset_md5=MD5
fileset_err=Не удалось сохранить набор файлов
fileset_ename=Отсутствует имя набора файлов
fileset_eclash=Набор файлов с таким именем уже существует
fileset_echild=Этот набор файлов не может быть удален, так как он используется $1
fileset_comp=Тип сжатия
fileset_gzipdef=<Уровень сжатия по умолчанию>
fileset_lzo=Компрессия LZO
fileset_gzip=Уровень Gzip $1
fileset_onefs=Ограничить резервное копирование одной файловой системой?
clients_title=Резервные клиенты
clients_none=Резервные клиенты еще не определены.
clients_name=Имя клиента
clients_address=Имя хоста или адрес
clients_catalog=Каталог
clients_add=Добавьте новый клиент резервного копирования.
clients_delete=Удалить выбранных клиентов
clients_return=список клиентов
clients_derr=Не удалось удалить клиентов
client_title1=Создать резервный клиент
client_title2=Изменить резервный клиент
client_header=Детали клиента, подлежащего резервному копированию
client_egone=Клиент больше не существует!
client_name=Имя клиента FD
client_address=Имя хоста или IP-адрес
client_port=Порт Bacula FD
client_pass=Bacula FD пароль
client_catalog=Каталог для использования
client_prune=Удалить устаревшие задания и файлы?
client_fileret=Сохраняйте резервные файлы для
client_jobret=Сохранять задания резервного копирования для
client_err=Не удалось сохранить резервную копию клиента
client_ename=Отсутствующее или неверное имя клиента
client_eclash=Клиент с таким именем уже существует
client_epass=Пароль отсутствует
client_eaddress=Отсутствует или неверное имя хоста или адрес
client_eport=Отсутствует или неверный порт FD
client_efileret=Отсутствует или неверный срок хранения файла
client_ejobret=Отсутствует или недействительный срок сохранения работы
client_echild=Этот клиент не может быть удален, так как он используется $1
client_status=Показать статус
job_title1=Создать задание резервного копирования
job_title2=Изменить задание резервного копирования
job_header=Подробности задания резервного копирования
job_name=Имя задания резервного копирования
job_enabled=Задание резервного копирования включено?
job_def=Тип по умолчанию
job_def0=Определение по умолчанию
job_def1=Автономная работа
job_def2=Наследовать по умолчанию от $1
job_type=Тип вакансии
job_level=Уровень резервного копирования
job_client=Клиент для резервного копирования
job_fileset=Файл установлен для резервного копирования
job_schedule=Резервное копирование по расписанию
job_storage=Устройство хранения назначения
job_pool=Объем пула
job_messages=Направление для сообщений
job_prority=Приоритет резервного копирования
job_err=Не удалось сохранить задание резервного копирования
job_ename=Отсутствует или неверное имя работы
job_eclash=Работа с таким именем уже существует
job_epriority=Отсутствует или неверный номер приоритета
job_echild=Это определение задания по умолчанию не может быть удалено, так как оно используется $1
job_run=Беги сейчас
job_before=Команда перед работой
job_after=Команда за работой
job_cbefore=Команда перед работой (на клиенте)
job_cafter=Команда за работой (на клиенте)
schedules_title=Расписание резервного копирования
schedules_none=Графики резервного копирования еще не определены.
schedules_name=Название расписания
schedules_sched=Запускать уровни и время
schedules_add=Добавьте новое расписание резервного копирования.
schedules_delete=Удалить выбранные расписания
schedules_return=список расписаний
schedules_derr=Не удалось удалить расписания
schedule_title1=Создать расписание резервного копирования
schedule_title2=Изменить расписание резервного копирования
schedule_header=Подробности расписания резервного копирования
schedule_name=Имя расписания резервного копирования
schedule_runs=Запускать уровни и время
schedule_level=Уровень резервного копирования
schedule_pool=объем
schedule_times=Запустить время от времени
schedule_err=Не удалось сохранить расписание резервного копирования
schedule_ename=Отсутствует или неверное имя расписания
schedule_eclash=Расписание с таким именем уже существует
schedule_etimes=Отсутствует время резервного копирования в строке $1
schedule_echild=Это расписание не может быть удалено, так как оно используется $1
backup_title=Запустить задание резервного копирования
backup_header=Подробности задания резервного копирования
backup_job=Работа бежать
backup_jd=$1 (набор файлов $2 на $3)
backup_wait=Ждать результатов?
backup_ok=Резервное копирование сейчас
backup_run=Запуск задания резервного копирования $1 ..
backup_return=резервная форма
backup_ejob=.. не смог найти работу!
backup_eok=.. работа не может быть начата
backup_running=.. задание резервного копирования запущено По завершении результаты будут показаны ниже.
backup_running2=.. задание резервного копирования было запущено в фоновом режиме.
backup_done=.. резервное копирование завершено.
backup_failed=.. резервное копирование не завершено успешно. Проверьте сообщение об ошибке выше для деталей.
gbackup_title=Запустить задание резервного копирования группы Bacula
gbackup_run=Запуск задания резервного копирования $1 на клиентах $2.
gbackup_on=Запуск задания резервного копирования на клиенте $1 :
gbackup_header=Детали задания резервного копирования группы Bacula
gbackup_jd=$1 (набор файлов $2 для группы $3)
dirstatus_title=Статус директора
dirstatus_sched=Задания резервного копирования по расписанию
dirstatus_name=Название работы
dirstatus_type=Тип
dirstatus_level=уровень
dirstatus_date=Беги в
dirstatus_date2=Началось с
dirstatus_volume=объем
dirstatus_schednone=Задания резервного копирования в настоящее время не запланированы.
dirstatus_id=ID запуска
dirstatus_status=Текущее состояние
dirstatus_run=Запуск резервных заданий
dirstatus_runnone=Задания резервного копирования не выполняются.
dirstatus_done=Завершенные задания резервного копирования
dirstatus_bytes=Размер
dirstatus_files=файлы
dirstatus_status2=Положение дел
dirstatus_donenone=Задания резервного копирования не выполнялись.
dirstatus_cancel=Отменить выбранные вакансии
dirstatus_refresh=Обновить список
clientstatus_title=Статус клиента
clientstatus_err=Не удалось получить состояние из $1 : $2
clientstatus_msg=Статус от $1 : $2
clientstatus_show=Показать статус клиента:
clientstatus_ok=Ok
clientstatus_on=$1 (на $2)
storages_title=Демоны хранения
storages_none=Демоны хранения еще не определены.
storages_name=Имя хранилища
storages_address=Имя хоста или адрес
storages_device=Накопитель
storages_type=Тип носителя
storages_add=Добавьте новый демон хранения.
storages_delete=Удалить выбранные демоны хранения
storages_return=список демонов хранилища
storages_derr=Не удалось удалить демоны хранилища
storage_title1=Создать демона хранилища
storage_title2=Редактировать демона хранилища
storage_header=Подробная информация о демоне удаленного хранения
storage_egone=Демон хранения больше не существует!
storage_name=Имя демона хранилища
storage_address=Имя хоста или IP-адрес
storage_port=Порт Bacula SD
storage_pass=Bacula SD пароль
storage_device=Имя устройства хранения
storage_media=Название типа носителя
storage_maxjobs=Максимальное количество одновременных заданий
storage_other=Другой ..
storage_err=Не удалось сохранить демон хранения
storage_ename=Отсутствует имя демона хранилища
storage_eclash=Демон хранения с таким именем уже существует
storage_epass=Пароль отсутствует
storage_eaddress=Отсутствует или неверное имя хоста или адрес
storage_eport=Отсутствует или неверный порт SD
storage_edevice=Отсутствует имя устройства хранения
storage_emedia=Отсутствует название типа носителя
storage_emaxjobs=Отсутствует максимальное количество одновременных заданий
storage_echild=Этот клиент не может быть удален, так как он используется $1
storage_status=Показать статус
devices_title=Устройства хранения данных
devices_none=Устройства хранения еще не определены.
devices_name=Имя устройства
devices_device=Файл устройства или каталог
devices_type=Тип носителя
devices_add=Добавьте новое устройство хранения.
devices_delete=Удалить выбранные устройства хранения
devices_return=список запоминающих устройств
devices_derr=Не удалось удалить устройства хранения
device_title1=Создать устройство хранения
device_title2=Изменить устройство хранения
device_header=Детали устройства хранения файлов
device_egone=Запоминающее устройство больше не существует!
device_name=Имя устройства хранения
device_device=Архивное устройство или каталог
device_media=Название типа носителя
device_label=Автоматически маркировать носитель?
device_random=Среда произвольного доступа?
device_auto=Монтировать автоматически?
device_removable=Съемные медиа?
device_always=Всегда держать открытым?
device_err=Не удалось сохранить устройство хранения
device_ename=Отсутствует имя устройства хранения
device_eclash=Устройство хранения с таким именем уже существует
device_emedia=Отсутствует название типа носителя
device_edevice=Отсутствует или недействительно архивное устройство или каталог
device_echild=Этот клиент не может быть удален, так как он используется $1
storagestatus_title=Состояние демона хранилища
storagestatus_err=Не удалось получить состояние из $1 : $2
storagestatus_msg=Статус от $1 : $2
storagestatus_show=Показать статус демона хранилища:
storagestatus_ok=Ok
label_title=Объем метки
label_header=Детали тома для маркировки
label_storage=Демон хранения для метки
label_pool=Создать в пуле
label_label=Новое название лейбла
label_ok=Ярлык сейчас
label_return=форма этикетки
label_run=Маркировка тома с $2 на демоне хранения $1 ..
label_estorage=.. демон хранения не найден!
label_eexists=.. указанный ярлык уже существует.
label_efailed=.. маркировка не удалась! Проверьте сообщение об ошибке выше по причине.
label_done=.. маркировка успешна.
label_epool=.. не смог найти бассейн!
label_err=Сбой ярлыка
label_elabel=Метка не введена
pools_title=Объемные бассейны
pools_none=Пулы томов еще не определены.
pools_name=Название бассейна
pools_type=Тип бассейна
pools_reten=Срок хранения
pools_add=Добавьте новый пул томов.
pools_delete=Удалить выбранные пулы томов
pools_return=список пулов томов
pools_derr=Не удалось удалить пулы томов
pool_title1=Создать пул томов
pool_title2=Изменить пул томов
pool_header=Подробная информация о пуле резервных томов
pool_egone=Пул томов больше не существует!
pool_name=Имя пула томов
pool_recycle=Автоматически перерабатывать тома?
pool_auto=Обрезать просроченные тома?
pool_any=Резервное копирование на любой том в пуле?
pool_reten=Срок хранения
pool_type=Тип пула томов
pool_max=Максимальное количество заданий на объем
pool_unlimited=неограниченный
pool_err=Не удалось сохранить устройство хранения
pool_ename=Отсутствует имя устройства хранения
pool_eclash=Устройство хранения с таким именем уже существует
pool_echild=Этот клиент не может быть удален, так как он используется $1
pool_emax=Отсутствует или недействительно максимальное количество заданий на том
pool_ereten=Отсутствует или недействительный срок хранения
pool_status=Показать объемы
pool_autolabel=Автоматически помечать префиксы томов
pool_maxvolsize=Максимальный размер тома (например, 5 ГБ для 5 гигабайт)
poolstatus_title=Объемы в бассейне
poolstatus_show=Показывать объемы в пуле:
poolstatus_ok=Ok
poolstatus_volumes=Объемы в выбранном пуле
poolstatus_name=Название тома
poolstatus_type=Тип носителя
poolstatus_first=Первый использованный
poolstatus_last=Последний раз был использован
poolstatus_bytes=Написано байтов
poolstatus_status=Режим резервного копирования
poolstatus_none=В этом пуле резервных копий нет томов.
poolstatus_never=Никогда
poolstatus_delete=Удалить выбранные тома
dvolumes_err=Не удалось удалить тома
dvolumes_enone=Не выбрано, ничего не выбрано
dvolumes_ebacula=Ошибка Bacula : $1
mount_title=Смонтировать или размонтировать
mount_header=Варианты подключения или отключения хранилища
mount_storage=Накопитель
mount_slot=Слот для автозагрузчика
mount_noslot=Никто
mount_slotno=Номер слота
mount_mount=Смонтировать Хранилище
mount_unmount=Un-Mount Storage
mount_run=Объем монтирования на устройстве хранения $1 ..
unmount_run=Отключение тома на устройстве хранения $1 ..
mount_done=.. успешно смонтирован.
unmount_done=.. успешно смонтирован.
mount_failed=.. монтировать не удалось! См. Сообщение об ошибке выше по причине.
unmount_failed=.. размонтировать не удалось! См. Сообщение об ошибке выше по причине.
mount_return=форма крепления
mount_err=Не удалось смонтировать устройство хранения
mount_eslot=Отсутствует или неверный номер слота
cancel_err=Не удалось отменить работу
cancel_enone=Не выбрано, ничего не выбрано
gjobs_title=Резервное копирование Bacula Group
gjobs_none=Задания резервного копирования группы Bacula еще не определены.
gjobs_add=Добавьте новое задание резервного копирования группы Bacula.
gjobs_delete=Удалить выбранные вакансии
gjobs_return=список групповых рабочих мест Bacula
gjobs_derr=Не удалось удалить задания группы Bacula
gjobs_client=Bacula группа для резервного копирования
gjob_title1=Создать задание резервного копирования группы Bacula
gjob_title2=Изменить задание резервного копирования группы Bacula
gjob_header=Детали задания резервного копирования группы Bacula
gjob_client=Bacula группа для резервного копирования
groups_title=Группы Bacula
groups_none=Группы Bacula еще не выбраны для резервного копирования.
groups_name=Имя группы
groups_port=FD порт
groups_add=Добавьте группу Bacula:
groups_ok=добавлять
groups_catalog=Каталог
groups_delete=Удалить выбранные группы Bacula
groups_return=список групп
groups_derr=Не удалось удалить группы
groups_noadd=Нет групп Bacula, которые можно было бы выбрать для резервного копирования.
groups_info=$1 ($2 участников)
groups_already=Все группы Bacula уже добавлены.
group_title1=Создать Bacula Group
group_title2=Редактировать Bacula Group
group_header=Подробная информация о группе Bacula для резервного копирования
group_egone=Группа больше не существует!
group_egone2=Группа Bacula больше не существует!
group_name=Название группы Bacula
group_port=Порт Bacula FD
group_err=Не удалось сохранить группу Bacula
group_eclash=Группа с таким именем уже существует
group_members=Хозяева в группе Bacula
sync_title=Синхронизация группы Bacula
sync_header=Параметры автоматической синхронизации клиента группы Bacula
sync_sched=Синхронизировать по расписанию?
sync_schedyes=Да, время от времени выбирается ниже ..
sync_err=Не удалось сохранить синхронизацию группы Bacula
log_create_client=Создан резервный клиент $1
log_modify_client=Модифицированный резервный клиент $1
log_delete_client=Удаленный резервный клиент $1
log_delete_clients=Удаленные клиенты резервного копирования $1
log_create_fileset=Создан набор файлов $1
log_modify_fileset=Модифицированный набор файлов $1
log_delete_fileset=Удаленный набор файлов $1
log_delete_filesets=Удаленные наборы файлов $1
log_create_job=Создано задание резервного копирования $1
log_modify_job=Измененное задание резервного копирования $1
log_delete_job=Удаленное задание резервного копирования $1
log_delete_jobs=Удалено $1 заданий резервного копирования
log_create_schedule=Создано расписание резервного копирования $1
log_modify_schedule=Изменено расписание резервного копирования $1
log_delete_schedule=Удалено расписание резервного копирования $1
log_delete_schedules=Удалено $1 расписаний резервного копирования
log_create_pool=Создан пул томов $1
log_modify_pool=Модифицированный пул томов $1
log_delete_pool=Удаленный пул томов $1
log_delete_pools=Удалены $1 тома пулов
log_create_storage=Создан демон хранения $1
log_modify_storage=Модифицированный демон хранения $1
log_delete_storage=Удаленный демон хранения $1
log_delete_storages=Удалено $1 демонов хранилища
log_create_device=Создано устройство хранения $1
log_modify_device=Модифицированное устройство хранения $1
log_delete_device=Удаленное устройство хранения $1
log_delete_devices=Удаленные устройства хранения $1
log_create_group=Создана группа Bacula $1
log_modify_group=Модифицированная группа Bacula $1
log_delete_group=Удаленная группа Bacula $1
log_delete_groups=Удаленные группы $1 Bacula
log_create_gjob=Создано задание резервного копирования группы Bacula $1
log_modify_gjob=Измененное задание резервного копирования группы Bacula $1
log_delete_gjob=Удаленное задание резервного копирования группы Bacula $1
log_delete_gjobs=Удалено $1 заданий резервного копирования группы Bacula
log_create_fdirector=Создан файл демона директора $1
log_modify_fdirector=Модифицированный файловый директор демона $1
log_delete_fdirector=Удаленный директор демона файлов $1
log_delete_fdirectors=Удалены директоры демонов файлов $1
log_create_sdirector=Создан директор демона хранилища $1
log_modify_sdirector=Модифицированный директор демона хранилища $1
log_delete_sdirector=Удаленный директор демона хранилища $1
log_delete_sdirectors=Удалены директоры демонов хранения $1
log_stop=Остановленные демоны Bacula
log_start=Запущены демоны Bacula
log_apply=Прикладная конфигурация
log_restart=Перезапущенные демоны Bacula
log_backup=Запущено задание резервного копирования $1
log_gbackup=Запущено задание резервного копирования группы Bacula $1
log_label=Помеченный демон хранения $1
log_mount=Установленное устройство хранения $1
log_unmount=Несмонтированное устройство хранения $1
log_sync=Сохраненная синхронизация группы Bacula
log_director=Сохранена глобальная конфигурация Bacula Director
log_file=Сохраненная конфигурация демона файла Bacula
log_storagec=Сохраненная конфигурация демона хранения Bacula
log_fixpass=Исправлен пароль консольной программы Bacula
director_title=Конфигурация директора
director_header=Варианты директора Global Bacula
director_name=Имя директора
director_port=Слушай на порт
director_jobs=Максимальное количество одновременных заданий
director_messages=Направление для сообщений
director_enone=Конфигурация директора не найдена!
director_dir=Рабочий каталог Bacula
director_err=Не удалось сохранить конфигурацию директора
director_ename=Отсутствует или неверное имя директора
director_eport=Отсутствует или неверный номер порта
director_ejobs=Отсутствует или неверное количество одновременных заданий
director_edir=Отсутствует или не существует рабочий каталог
tls_enable=Включить шифрование TLS?
tls_require=Принимать только TLS-соединения?
tls_verify=Проверять клиентов TLS?
tls_cert=Файл сертификата TLS PEM
tls_key=Файл ключей TLS PEM
tls_cacert=Файл центра сертификации TLS PEM
tls_none=Никто
tls_ecert=Отсутствует или не существует файл сертификата TLS
tls_ekey=Отсутствует или не существует файл ключа TLS
tls_ecacert=Отсутствует или не существует файл сертификата TLS CA
tls_ecerts=Для включения TLS необходимо указать файлы сертификатов, ключей и CA.
file_title=Конфигурация File Daemon
file_header=Опции демона файла Bacula
file_name=Имя файла-демона
file_port=Слушай на порт
file_jobs=Максимальное количество одновременных заданий
file_dir=Рабочий каталог Bacula
file_enone=Не найдена конфигурация файлового демона!
file_err=Не удалось сохранить конфигурацию файлового демона
file_ename=Отсутствует или неверно указано имя демона файла
file_eport=Отсутствует или неверный номер порта
file_ejobs=Отсутствует или неверное количество одновременных заданий
file_edir=Отсутствует или не существует рабочий каталог
fdirectors_title=File Daemon Director
fdirectors_none=Директора еще не определены.
fdirectors_name=Имя директора
fdirectors_pass=Принятый пароль
fdirectors_add=Добавить нового директора.
fdirectors_delete=Удалить выбранных директоров
fdirectors_return=список директоров
fdirectors_derr=Не удалось удалить директоров
fdirector_title1=Создать файл Daemon Director
fdirector_title2=Редактировать файл Daemon Director
fdirector_header=Подробности управления удаленным директором
fdirector_egone=Директора больше не существует!
fdirector_name=Имя директора
fdirector_pass=Принятый пароль
fdirector_monitor=Разрешить только мониторинг соединения?
fdirector_err=Не удалось сохранить файл с директором демона
fdirector_ename=Отсутствует имя директора
fdirector_eclash=Директор с таким именем уже существует
fdirector_epass=Пароль отсутствует
sdirectors_title=Директора хранилища
sdirectors_none=Директора еще не определены.
sdirectors_name=Имя директора
sdirectors_pass=Принятый пароль
sdirectors_add=Добавить нового директора.
sdirectors_delete=Удалить выбранных директоров
sdirectors_return=список директоров
sdirectors_derr=Не удалось удалить директоров
sdirector_title1=Создать Storage Daemon Director
sdirector_title2=Редактор Storage Daemon Director
sdirector_header=Подробности управления удаленным директором
sdirector_egone=Директора больше не существует!
sdirector_name=Имя директора
sdirector_pass=Принятый пароль
sdirector_monitor=Разрешить только мониторинг соединения?
sdirector_err=Не удалось сохранить директорию демона хранилища
sdirector_ename=Отсутствует имя директора
sdirector_eclash=Директор с таким именем уже существует
sdirector_epass=Пароль отсутствует
storagec_title=Конфигурация демона хранилища
storagec_header=Опции демона хранения Bacula
storagec_name=Имя демона
storagec_port=Слушай на порт
storagec_jobs=Максимальное количество одновременных заданий
storagec_enone=Не найдена конфигурация демона хранилища!
storagec_dir=Рабочий каталог Bacula
storagec_err=Не удалось сохранить конфигурацию демона хранилища
storagec_ename=Отсутствует или неверное имя демона хранилища
storagec_eport=Отсутствует или неверный номер порта
storagec_ejobs=Отсутствует или неверное количество одновременных заданий
storagec_edir=Отсутствует или не существует рабочий каталог
chooser_title=Выберите расписание
chooser_monthsh=Месяцы для выполнения
chooser_months=Месяцы года
chooser_all=Все
chooser_sel=Выбрано ниже ..
chooser_ok=Ok
chooser_timeh=Время дня для выполнения
chooser_time=Час и минута
chooser_weekdaysh=Дни недели для выполнения
chooser_weekdays=Дни недели
chooser_weekdaynums=Числа в месяц
chooser_daysh=Дни месяца для выполнения
chooser_days=Даты
chooser_err=Не удалось выбрать расписание
chooser_emonths=Месяцы не выбраны
chooser_eweekdays=Будние дни не выбраны
chooser_eweekdaynums=Не выбраны номера дня недели
chooser_edays=Не выбраны дни месяца
chooser_ehour=Отсутствует или неверный час дня
chooser_eminute=Недостающая или недействительная минута
chooser_emonthsrange=Выбранные месяцы должны быть смежными
chooser_eweekdaysrange=Выбранные будни должны быть смежными
chooser_eweekdaynumsrange=Выбранные номера будних дней должны быть смежными
chooser_edaysrange=Выбранные дни месяца должны быть смежными
weekdaynum_1=Первый
weekdaynum_2=второй
weekdaynum_3=Третий
weekdaynum_4=четвертый
weekdaynum_5=пятый
restore_title=Восстановление резервной копии
restore_title2=Восстановить резервную копию в Bacula Group
restore_title3=Восстановление Bacula Group Backup
restore_header=Параметры для восстановления предыдущего задания резервного копирования
restore_job=Работа для восстановления
restore_files=Файлы для восстановления
restore_client=Восстановить клиенту или группе
restore_storage=Восстановить с устройства хранения
restore_where=Восстановить в каталог
restore_where2=Другой корневой каталог
restore_ewhere=Отсутствующий каталог для восстановления в
restore_ok=Восстановить сейчас
restore_err=Не удалось восстановить резервную копию
restore_efiles=Файлы не введены
restore_ejob=Неверный идентификатор работы
restore_ejobfiles=Нет файлов для работы
restore_run=Запуск восстановления задания $1 на клиент $2 из хранилища $3 ..
restore_return=восстановить форму
restore_eok=.. работа не может быть начата
restore_running=.. восстановление сейчас запущено. По завершении результаты будут показаны ниже.
restore_running2=.. восстановление было начато в фоновом режиме.
restore_done=.. восстановление завершено.
restore_failed=.. восстановление не завершилось успешно. Проверьте сообщение об ошибке выше для деталей.
restore_clist=--Clients--
restore_glist=--Bacula Groups--
restore_eclient=Не выбран ни клиент, ни группа Bacula
restore_egroup=Bacula группа не существует
restore_jlist=Работа в одиночной системе
restore_njlist=--Bacula Groups Jobs--
restore_all=- Все клиенты в группе--
restore_eclients=Не найдены участники резервного копирования группы Bacula!
restore_eall1=Параметр <b>Все клиенты в резервной копии</b> должен быть выбран для <b>Восстановить клиент или группу</b> при выполнении группового восстановления задания Bacula.
restore_eall2=Параметр <b>Все клиенты в резервной копии</b> можно выбрать только для <b>Восстановить клиенту или группе</b> при выполнении восстановления групповой работы Bacula.
restore_enofiles=Ни один из выбранных файлов не находится в резервной копии
restore_level_F=Полный
restore_level_D=дифференцированный
restore_level_I=дополнительный
| {
"pile_set_name": "Github"
} |
package consul
import (
"fmt"
"net"
"os"
"sync"
"testing"
"time"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/hashicorp/serf/serf"
)
func testClientConfig(t *testing.T, NodeName string) (string, *Config) {
dir := tmpDir(t)
config := DefaultConfig()
config.Datacenter = "dc1"
config.DataDir = dir
config.NodeName = NodeName
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
Port: getPort(),
}
config.SerfLANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfLANConfig.MemberlistConfig.BindPort = getPort()
config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 200 * time.Millisecond
config.SerfLANConfig.MemberlistConfig.ProbeInterval = time.Second
config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
return dir, config
}
func testClient(t *testing.T) (string, *Client) {
return testClientDC(t, "dc1")
}
func testClientDC(t *testing.T, dc string) (string, *Client) {
dir, config := testClientConfig(t, "testco.internal")
config.Datacenter = dc
client, err := NewClient(config)
if err != nil {
t.Fatalf("err: %v", err)
}
return dir, client
}
func testClientWithConfig(t *testing.T, cb func(c *Config)) (string, *Client) {
name := fmt.Sprintf("Client %d", getPort())
dir, config := testClientConfig(t, name)
cb(config)
client, err := NewClient(config)
if err != nil {
t.Fatalf("err: %v", err)
}
return dir, client
}
func TestClient_StartStop(t *testing.T) {
dir, client := testClient(t)
defer os.RemoveAll(dir)
if err := client.Shutdown(); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestClient_JoinLAN(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, c1 := testClient(t)
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := c1.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
testutil.WaitForResult(func() (bool, error) {
return c1.servers.NumServers() == 1, nil
}, func(err error) {
t.Fatalf("expected consul server")
})
// Check the members
testutil.WaitForResult(func() (bool, error) {
server_check := len(s1.LANMembers()) == 2
client_check := len(c1.LANMembers()) == 2
return server_check && client_check, nil
}, func(err error) {
t.Fatalf("bad len")
})
// Check we have a new consul
testutil.WaitForResult(func() (bool, error) {
return c1.servers.NumServers() == 1, nil
}, func(err error) {
t.Fatalf("expected consul server")
})
}
func TestClient_JoinLAN_Invalid(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, c1 := testClientDC(t, "other")
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := c1.JoinLAN([]string{addr}); err == nil {
t.Fatalf("should error")
}
time.Sleep(50 * time.Millisecond)
if len(s1.LANMembers()) != 1 {
t.Fatalf("should not join")
}
if len(c1.LANMembers()) != 1 {
t.Fatalf("should not join")
}
}
func TestClient_JoinWAN_Invalid(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, c1 := testClientDC(t, "dc2")
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfWANConfig.MemberlistConfig.BindPort)
if _, err := c1.JoinLAN([]string{addr}); err == nil {
t.Fatalf("should error")
}
time.Sleep(50 * time.Millisecond)
if len(s1.WANMembers()) != 1 {
t.Fatalf("should not join")
}
if len(c1.LANMembers()) != 1 {
t.Fatalf("should not join")
}
}
func TestClient_RPC(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, c1 := testClient(t)
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Try an RPC
var out struct{}
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != structs.ErrNoServers {
t.Fatalf("err: %v", err)
}
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := c1.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Check the members
if len(s1.LANMembers()) != 2 {
t.Fatalf("bad len")
}
if len(c1.LANMembers()) != 2 {
t.Fatalf("bad len")
}
// RPC should succeed
testutil.WaitForResult(func() (bool, error) {
err := c1.RPC("Status.Ping", struct{}{}, &out)
return err == nil, err
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
func TestClient_RPC_Pool(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, c1 := testClient(t)
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Try to join.
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := c1.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if len(s1.LANMembers()) != 2 || len(c1.LANMembers()) != 2 {
t.Fatalf("Server has %v of %v expected members; Client has %v of %v expected members.", len(s1.LANMembers()), 2, len(c1.LANMembers()), 2)
}
// Blast out a bunch of RPC requests at the same time to try to get
// contention opening new connections.
var wg sync.WaitGroup
for i := 0; i < 150; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var out struct{}
testutil.WaitForResult(func() (bool, error) {
err := c1.RPC("Status.Ping", struct{}{}, &out)
return err == nil, err
}, func(err error) {
t.Fatalf("err: %v", err)
})
}()
}
wg.Wait()
}
func TestClient_RPC_ConsulServerPing(t *testing.T) {
var servers []*Server
var serverDirs []string
const numServers = 5
for n := numServers; n > 0; n-- {
var bootstrap bool
if n == numServers {
bootstrap = true
}
dir, s := testServerDCBootstrap(t, "dc1", bootstrap)
defer os.RemoveAll(dir)
defer s.Shutdown()
servers = append(servers, s)
serverDirs = append(serverDirs, dir)
}
const numClients = 1
clientDir, c := testClient(t)
defer os.RemoveAll(clientDir)
defer c.Shutdown()
// Join all servers.
for _, s := range servers {
addr := fmt.Sprintf("127.0.0.1:%d",
s.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := c.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
}
// Sleep to allow Serf to sync, shuffle, and let the shuffle complete
time.Sleep(1 * time.Second)
c.servers.ResetRebalanceTimer()
time.Sleep(1 * time.Second)
if len(c.LANMembers()) != numServers+numClients {
t.Errorf("bad len: %d", len(c.LANMembers()))
}
for _, s := range servers {
if len(s.LANMembers()) != numServers+numClients {
t.Errorf("bad len: %d", len(s.LANMembers()))
}
}
// Ping each server in the list
var pingCount int
for range servers {
time.Sleep(1 * time.Second)
s := c.servers.FindServer()
ok, err := c.connPool.PingConsulServer(s)
if !ok {
t.Errorf("Unable to ping server %v: %s", s.String(), err)
}
pingCount += 1
// Artificially fail the server in order to rotate the server
// list
c.servers.NotifyFailedServer(s)
}
if pingCount != numServers {
t.Errorf("bad len: %d/%d", pingCount, numServers)
}
}
func TestClient_RPC_TLS(t *testing.T) {
dir1, conf1 := testServerConfig(t, "a.testco.internal")
conf1.VerifyIncoming = true
conf1.VerifyOutgoing = true
configureTLS(conf1)
s1, err := NewServer(conf1)
if err != nil {
t.Fatalf("err: %v", err)
}
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, conf2 := testClientConfig(t, "b.testco.internal")
conf2.VerifyOutgoing = true
configureTLS(conf2)
c1, err := NewClient(conf2)
if err != nil {
t.Fatalf("err: %v", err)
}
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Try an RPC
var out struct{}
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != structs.ErrNoServers {
t.Fatalf("err: %v", err)
}
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := c1.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Check the members
if len(s1.LANMembers()) != 2 {
t.Fatalf("bad len")
}
if len(c1.LANMembers()) != 2 {
t.Fatalf("bad len")
}
// RPC should succeed
testutil.WaitForResult(func() (bool, error) {
err := c1.RPC("Status.Ping", struct{}{}, &out)
return err == nil, err
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
func TestClientServer_UserEvent(t *testing.T) {
clientOut := make(chan serf.UserEvent, 2)
dir1, c1 := testClientWithConfig(t, func(conf *Config) {
conf.UserEventHandler = func(e serf.UserEvent) {
clientOut <- e
}
})
defer os.RemoveAll(dir1)
defer c1.Shutdown()
serverOut := make(chan serf.UserEvent, 2)
dir2, s1 := testServerWithConfig(t, func(conf *Config) {
conf.UserEventHandler = func(e serf.UserEvent) {
serverOut <- e
}
})
defer os.RemoveAll(dir2)
defer s1.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := c1.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for the leader
testutil.WaitForLeader(t, s1.RPC, "dc1")
// Check the members
testutil.WaitForResult(func() (bool, error) {
return len(c1.LANMembers()) == 2 && len(s1.LANMembers()) == 2, nil
}, func(err error) {
t.Fatalf("bad len")
})
// Fire the user event
codec := rpcClient(t, s1)
event := structs.EventFireRequest{
Name: "foo",
Datacenter: "dc1",
Payload: []byte("baz"),
}
if err := msgpackrpc.CallWithCodec(codec, "Internal.EventFire", &event, nil); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for all the events
var clientReceived, serverReceived bool
for i := 0; i < 2; i++ {
select {
case e := <-clientOut:
switch e.Name {
case "foo":
clientReceived = true
default:
t.Fatalf("Bad: %#v", e)
}
case e := <-serverOut:
switch e.Name {
case "foo":
serverReceived = true
default:
t.Fatalf("Bad: %#v", e)
}
case <-time.After(10 * time.Second):
t.Fatalf("timeout")
}
}
if !serverReceived || !clientReceived {
t.Fatalf("missing events")
}
}
func TestClient_Encrypted(t *testing.T) {
dir1, c1 := testClient(t)
defer os.RemoveAll(dir1)
defer c1.Shutdown()
key := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
dir2, c2 := testClientWithConfig(t, func(c *Config) {
c.SerfLANConfig.MemberlistConfig.SecretKey = key
})
defer os.RemoveAll(dir2)
defer c2.Shutdown()
if c1.Encrypted() {
t.Fatalf("should not be encrypted")
}
if !c2.Encrypted() {
t.Fatalf("should be encrypted")
}
}
| {
"pile_set_name": "Github"
} |
<?php
/*
* Opulence
*
* @link https://www.opulencephp.com
* @copyright Copyright (C) 2017 David Young
* @license https://github.com/opulencephp/Opulence/blob/master/LICENSE.md
*/
namespace Opulence\Authentication;
/**
* Defines the current authentication context
*/
class AuthenticationContext implements IAuthenticationContext
{
/** @var ISubject|null The current subject */
private $subject = null;
/** @var string The current authentication status */
private $status = AuthenticationStatusTypes::UNAUTHENTICATED;
/**
* @param ISubject|null $subject The current subject
* @param string $status The current authentication status
*/
public function __construct(
ISubject $subject = null,
string $status = AuthenticationStatusTypes::UNAUTHENTICATED
) {
if ($subject !== null) {
$this->setSubject($subject);
}
$this->setStatus($status);
}
/**
* @inheritdoc
*/
public function getStatus() : string
{
return $this->status;
}
/**
* @inheritdoc
*/
public function getSubject()
{
return $this->subject;
}
/**
* @inheritdoc
*/
public function isAuthenticated() : bool
{
return $this->status === AuthenticationStatusTypes::AUTHENTICATED;
}
/**
* @inheritdoc
*/
public function setStatus(string $status)
{
$this->status = $status;
}
/**
* @inheritdoc
*/
public function setSubject(ISubject $subject)
{
$this->subject = $subject;
}
}
| {
"pile_set_name": "Github"
} |
---
title: Custom Mechanism
order: 9
---
This document will introduce custom mechanism in G6, including custom node, custom edge, custom behavior, custom layout. All of them are mounted on global G6, called by `G6.registerXXX`.
## G6.registerNode(nodeName, options, extendedNodeName)
When the built-in nodes cannot satisfy your requirments, custom a type of node by `G6.registerNode(nodeName, options, extendedNodeName)`.
### Parameters
| Name | Type | Required | Description |
| --- | --- | --- | --- |
| nodeName | String | true | The unique name of the custom node. |
| options | Object | true | The configurations of custom node, include functions of complete life cycles. Please refer to [Shape Doc](/en/docs/manual/middle/elements/shape/shape-keyshape) and [Custom Item API](/en/docs/api/CustomItem). |
| extendedNodeName | String | false | Specifies the inherited node type of the custom node. Declare this property if you want to extend a built-in node. [Built-in Nodes](/en/docs/manual/middle/elements/nodes/defaultNode) document. |
### Usage
```javascript
G6.registerNode(
'nodeName',
{
/**
* Draw this type of node with label
* @param {Object} cfg The configurations of this type of node
* @param {G.Group} group Graphics group, the container of the shapes of the node
* @return {G.Shape} The keyShape of the type of node. The keyShape can be obtained by node.get('keyShape')
*/
draw(cfg, group) {},
/**
* Operations to be executed after drawing. No operation by default
* @param {Object} cfg The configurations of this type of node
* @param {G.Group} group Graphics group, the container of the shapes of the node
*/
afterDraw(cfg, group) {},
/**
* Update the node with label
* @override
* @param {Object} cfg The configurations of this type of node
* @param {Node} node The node
*/
update(cfg, node) {},
/**
* Operations to be executed after updating.
* @override
* @param {Object} cfg The configurations of this type of node
* @param {Node} node The node
*/
afterUpdate(cfg, node) {},
/**
* After graph.setItemState(item, state, value) is called, this function will do some responses.
* @param {String} name The name of state
* @param {Object} value The value of the state
* @param {Node} node The node
*/
setState(name, value, node) {},
/**
* Get the anchor points
* @param {Object} cfg The configurations of this type of node
* @return {Array|null} The array of anchor points. There is no anchor points if it is null.
*/
getAnchorPoints(cfg) {},
},
'extendedNodeName',
);
```
## G6.registerEdge(edgeName, options, extendedEdgeName)
When the built-in edges cannot satisfy your requirments, custom a type of edge by `G6.registerEdge(edgeName, options, extendedEdgeName)`.
### Parameters
| Name | Type | Required | Description |
| --- | --- | --- | --- |
| edgeName | String | true | The unique name of the custom edge. |
| options | Object | true | The configurations of custom edge, include functions of complete life cycles. Please refer to [Shape Doc](/en/docs/manual/middle/elements/shape/shape-keyshape) and [Custom Item API](/en/docs/api/CustomItem). |
| extendedEdgeName | String | false | Specifies the inherited node type of the custom node. Declare this property if you want to extend the a built-in edge. [Built-in Edges](/en/docs/manual/middle/elements/edges/defaultEdge) document. |
### Usage
```javascript
G6.registerEdge(
'edgeName',
{
/**
* Draw this type of edge with label
* @param {Object} cfg The configurations of this type of edge
* @param {G.Group} group Graphics group, the container of the shapes of the edge
* @return {G.Shape} The keyShape of the type of edge. The keyShape can be obtained by edge.get('keyShape')
*/
draw(cfg, group) {},
/**
* Operations to be executed after drawing. No operation by default
* @param {Object} cfg The configurations of this type of edge
* @param {G.Group} group Graphics group, the container of the shapes of the edge
*/
afterDraw(cfg, group) {},
/**
* Update the edge with label
* @override
* @param {Object} cfg The configurations of this type of edge
* @param {Edge} edge The edge
*/
update(cfg, edge) {},
/**
* Operations to be executed after updating.
* @override
* @param {Object} cfg The configurations of this type of edge
* @param {Edge} edge The edge
*/
afterUpdate(cfg, edge) {},
/**
* After [`graph.setItemState(item, state, value)`] is called, this function will do some responses.
* @param {String} name The name of state
* @param {Object} value The value of the state
* @param {Edge} edge The edge
*/
setState(name, value, edge) {},
},
'extendedEdgeName',
);
```
## G6.registerCombo(comboName, options, extendedComboName)
When the built-in combos cannot satisfy your requirments, custom a type of combo by `G6.registerCombo(comboName, options, extendedComboName)`.
### Parameters
| Name | Type | Required | Description |
| --- | --- | --- | --- |
| comboName | String | true | The unique name of the custom combo. |
| options | Object | true | The configurations of custom combo, include functions of complete life cycles. Please refer to [Shape Doc](/en/docs/manual/middle/elements/shape/shape-keyshape) and [Custom Item API](/en/docs/api/CustomItem). |
| extendedComboName | String | false | Specifies the inherited combo type of the custom combo. Declare this property if you want to extend a built-in combo. [Built-in Combos](/en/docs/manual/middle/elements/combos/defaultCombo) document. |
### Usage
```javascript
G6.registerCombo(
'comboName',
{
/**
* Draw this type of combo with label
* @param {Object} cfg The configurations of this type of combo
* @param {G.Group} group Graphics group, the container of the shapes in the combo
* @return {G.Shape} The keyShape of the type of combo. The keyShape can be obtained by combo.get('keyShape')
*/
draw(cfg, group) {},
/**
* Operations to be executed after drawing. No operation by default
* @param {Object} cfg The configurations of this type of combo
* @param {G.Group} group Graphics group, the container of the shapes in the combo
*/
afterDraw(cfg, group) {},
/**
* Update the combo with label
* @override
* @param {Object} cfg The configurations of this type of combo
* @param {Combo} combo The combo
*/
update(cfg, combo) {},
/**
* Operations to be executed after updating.
* @override
* @param {Object} cfg The configurations of this type of combo
* @param {Combo} combo The combo
*/
afterUpdate(cfg, combo) {},
/**
* After graph.setItemState(item, state, value) is called, this function will do some responses.
* @param {String} name The name of state
* @param {Object} value The value of the state
* @param {Combo} combo The combo
*/
setState(name, value, combo) {},
/**
* Get the anchor points
* @param {Object} cfg The configurations of this type of combo
* @return {Array|null} The array of anchor points. There is no anchor points if it is null.
*/
getAnchorPoints(cfg) {},
},
'extendedComboName',
);
```
## G6.registerBehavior(behaviorName, behavior)
When the [built-in Behaviors](/en/docs/manual/middle/states/defaultBehavior) cannot satisfy your requirments, custom a type of Behavior by `G6.registerBehavior(behaviorName, behavior)`. See [Behavior API](/en/docs/api/Behavior) for detail.
### Parameters
| Name | Type | Required | Description |
| --- | --- | --- | --- |
| behaviorName | String | true | The name of custom Behavior. |
| behavior | Object | true | The configurations of custom Behavior. For more information, please refer to [Behavior API](/en/docs/api/Behavior). |
### Usage
```javascript
// Custom a type of Behavior
G6.registerBehavior('behaviorName', {
// Bind the event and its callback
getEvents() {
return {
'node:click': 'onClick',
mousemove: 'onMousemove',
'edge:click': 'onEdgeClick',
};
},
/**
* Handle the callback for node:click
* @override
* @param {Object} evt The handler
*/
onClick(evt) {
const node = evt.item;
const graph = this.graph;
const point = { x: evt.x, y: evt.y };
const model = node.getModel();
// TODO
},
/**
* Handle the callback for mousemove
* @override
* @param {Object} evt The handler
*/
onMousemove(evt) {
// TODO
},
/**
* Handle the callback for :click
* @override
* @param {Object} evt The handler
*/
onEdgeClick(evt) {
// TODO
},
});
```
## G6.registerLayout(layoutName, layout)
When the built-in Layouts cannot satisfy your requirments, custom a type of Layout by `G6.registerLayout(layoutName, layout)`.
### Parameters
| Name | Type | Required | Description |
| --- | --- | --- | --- |
| layoutName | String | true | The name of the custom layout. |
| layout | Object | true | The configurations of the custom layout. For more information, please refer to [Layout API](/en/docs/manual/middle/layout/custom-layout). |
### Usage
```javascript
G6.registerLayout('layoutName', {
/**
* The default configurations will be mixed by configurations from user
*/
getDefaultCfg() {
return {};
},
/**
* Initialize
* @param {Object} data The data
*/
init(data) {
const self = this;
self.nodes = data.nodes;
self.edges = data.edges;
},
/**
* Execute the layout
*/
execute() {
// TODO
},
/**
* Layout with the data
* @param {Object} data The data
*/
layout(data) {
const self = this;
self.init(data);
self.execute();
},
/**
* Update the configurations of the layout, but it does not execute the layout
* @param {Object} cfg The new configurations
*/
updateCfg(cfg) {
const self = this;
Util.mix(self, cfg);
},
/**
* Destroy the layout
*/
destroy() {
const self = this;
self.positions = null;
self.nodes = null;
self.edges = null;
self.destroyed = true;
},
});
```
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.cache;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.logging.log4j.Logger;
import org.apache.geode.SystemFailure;
import org.apache.geode.distributed.internal.ClusterDistributionManager;
import org.apache.geode.distributed.internal.HighPriorityDistributionMessage;
import org.apache.geode.distributed.internal.MessageWithReply;
import org.apache.geode.distributed.internal.ReplyMessage;
import org.apache.geode.internal.serialization.DeserializationContext;
import org.apache.geode.internal.serialization.SerializationContext;
import org.apache.geode.logging.internal.log4j.api.LogService;
/** Creates a new instance of CloseCacheMessage */
public class CloseCacheMessage extends HighPriorityDistributionMessage implements MessageWithReply {
private static final Logger logger = LogService.getLogger();
private int processorId;
@Override
public int getProcessorId() {
return this.processorId;
}
@Override
public boolean sendViaUDP() {
return true;
}
@Override
protected void process(ClusterDistributionManager dm) {
// Now that Cache.close calls close on each region we don't need
// any of the following code so we can just do an immediate ack.
boolean systemError = false;
try {
try {
PartitionedRegionHelper.cleanUpMetaDataOnNodeFailure(dm.getCache(), getSender());
} catch (VirtualMachineError err) {
systemError = true;
SystemFailure.initiateFailure(err);
// If this ever returns, rethrow the error. We're poisoned
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
if (logger.isDebugEnabled()) {
logger.debug("Throwable caught while processing cache close message from:{}", getSender(),
t);
}
}
} finally {
if (!systemError) {
ReplyMessage.send(getSender(), processorId, null, dm, false, false, true);
}
}
}
public void setProcessorId(int id) {
this.processorId = id;
}
@Override
public String toString() {
return super.toString() + " (processorId=" + processorId + ")";
}
@Override
public int getDSFID() {
return CLOSE_CACHE_MESSAGE;
}
@Override
public void fromData(DataInput in,
DeserializationContext context) throws IOException, ClassNotFoundException {
super.fromData(in, context);
this.processorId = in.readInt();
}
@Override
public void toData(DataOutput out,
SerializationContext context) throws IOException {
super.toData(out, context);
out.writeInt(this.processorId);
}
}
| {
"pile_set_name": "Github"
} |
.. apidocs file containing the API Documentation
.. _metricsdocs:
Metrics Module (API Reference)
==============================
.. automodule:: scikitplot.metrics
:members: plot_confusion_matrix, plot_roc, plot_ks_statistic, plot_precision_recall, plot_silhouette, plot_calibration_curve, plot_cumulative_gain, plot_lift_curve | {
"pile_set_name": "Github"
} |
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2013 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::fvPatch
Description
A finiteVolume patch using a polyPatch and a fvBoundaryMesh
SourceFiles
fvPatch.C
fvPatchNew.C
\*---------------------------------------------------------------------------*/
#ifndef fvPatch_H
#define fvPatch_H
#include "polyPatch.H"
#include "labelList.H"
#include "SubList.H"
#include "typeInfo.H"
#include "tmp.H"
#include "primitiveFields.H"
#include "SubField.H"
#include "fvPatchFieldsFwd.H"
#include "autoPtr.H"
#include "runTimeSelectionTables.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
class fvBoundaryMesh;
class surfaceInterpolation;
/*---------------------------------------------------------------------------*\
Class fvPatch Declaration
\*---------------------------------------------------------------------------*/
class fvPatch
{
// Private data
//- Reference to the underlying polyPatch
const polyPatch& polyPatch_;
//- Reference to boundary mesh
const fvBoundaryMesh& boundaryMesh_;
// Private Member Functions
//- Disallow construct as copy
fvPatch(const fvPatch&);
//- Disallow assignment
void operator=(const fvPatch&);
protected:
// Protected Member Functions
//- Make patch weighting factors
virtual void makeWeights(scalargpuField&) const;
//- Initialise the patches for moving points
virtual void initMovePoints();
//- Correct patches after moving points
virtual void movePoints();
public:
typedef fvBoundaryMesh BoundaryMesh;
friend class fvBoundaryMesh;
friend class surfaceInterpolation;
//- Runtime type information
TypeName(polyPatch::typeName_());
// Declare run-time constructor selection tables
declareRunTimeSelectionTable
(
autoPtr,
fvPatch,
polyPatch,
(const polyPatch& patch, const fvBoundaryMesh& bm),
(patch, bm)
);
// Constructors
//- Construct from polyPatch and fvBoundaryMesh
fvPatch(const polyPatch&, const fvBoundaryMesh&);
// Selectors
//- Return a pointer to a new patch created on freestore from polyPatch
static autoPtr<fvPatch> New
(
const polyPatch&,
const fvBoundaryMesh&
);
//- Destructor
virtual ~fvPatch();
// Member Functions
// Access
//- Return the polyPatch
const polyPatch& patch() const
{
return polyPatch_;
}
//- Return name
const word& name() const
{
return polyPatch_.name();
}
//- Return start label of this patch in the polyMesh face list
label start() const
{
return polyPatch_.start();
}
//- Return size
virtual label size() const
{
return polyPatch_.size();
}
//- Return true if this patch is coupled
virtual bool coupled() const
{
return polyPatch_.coupled();
}
//- Return true if the given type is a constraint type
static bool constraintType(const word& pt);
//- Return a list of all the constraint patch types
static wordList constraintTypes();
//- Return the index of this patch in the fvBoundaryMesh
label index() const
{
return polyPatch_.index();
}
//- Return boundaryMesh reference
const fvBoundaryMesh& boundaryMesh() const
{
return boundaryMesh_;
}
//- Return faceCells
virtual const labelgpuList& faceCells() const;
virtual const labelList& faceCellsHost() const;
// Access functions for geometrical data
//- Return face centres
const vectorgpuField& Cf() const;
//- Return neighbour cell centres
tmp<vectorgpuField> Cn() const;
//- Return face area vectors
const vectorgpuField& Sf() const;
//- Return face area magnitudes
const scalargpuField& magSf() const;
//- Return face normals
tmp<vectorgpuField> nf() const;
//- Return cell-centre to face-centre vector
// except for coupled patches for which the cell-centre
// to coupled-cell-centre vector is returned
virtual tmp<vectorgpuField> delta() const;
// Access functions for demand driven data
//- Return patch weighting factors
const scalargpuField& weights() const;
//- Return the face - cell distance coeffient
// except for coupled patches for which the cell-centre
// to coupled-cell-centre distance coeffient is returned
const scalargpuField& deltaCoeffs() const;
// Evaluation functions
//- Return given internal field next to patch as patch field
template<class Type>
tmp<gpuField<Type> > patchInternalField(const gpuList<Type>&) const;
//- Return given internal field next to patch as patch field
template<class Type>
void patchInternalField(const gpuList<Type>&, gpuField<Type>&) const;
//- Return given internal field next to patch as patch field
template<class Type>
tmp<Field<Type> > patchInternalField(const UList<Type>&) const;
//- Return given internal field next to patch as patch field
template<class Type>
void patchInternalField(const UList<Type>&, Field<Type>&) const;
//- Return the corresponding patchField of the named field
template<class GeometricField, class Type>
const typename GeometricField::PatchFieldType& patchField
(
const GeometricField&
) const;
//- Lookup and return the patchField of the named field from the
// local objectRegistry.
// N.B. The dummy pointer arguments are used if this function is
// instantiated within a templated function to avoid a bug in gcc.
// See inletOutletFvPatchField.C and outletInletFvPatchField.C
template<class GeometricField, class Type>
const typename GeometricField::PatchFieldType& lookupPatchField
(
const word& name,
const GeometricField* = NULL,
const Type* = NULL
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
# include "fvPatchTemplates.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.grpc.internal
import akka.util.ByteString
object Identity extends Codec {
override val name = "identity"
override def compress(bytes: ByteString): ByteString = bytes
override def uncompress(bytes: ByteString): ByteString = bytes
}
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra host1x Command DMA
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include "../cdma.h"
#include "../channel.h"
#include "../dev.h"
#include "../debug.h"
/*
* Put the restart at the end of pushbuffer memory
*/
static void push_buffer_init(struct push_buffer *pb)
{
*(u32 *)(pb->mapped + pb->size) = host1x_opcode_restart(0);
}
/*
* Increment timedout buffer's syncpt via CPU.
*/
static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
u32 syncpt_incrs, u32 syncval, u32 nr_slots)
{
unsigned int i;
for (i = 0; i < syncpt_incrs; i++)
host1x_syncpt_incr(cdma->timeout.syncpt);
/* after CPU incr, ensure shadow is up to date */
host1x_syncpt_load(cdma->timeout.syncpt);
}
/*
* Start channel DMA
*/
static void cdma_start(struct host1x_cdma *cdma)
{
struct host1x_channel *ch = cdma_to_channel(cdma);
u64 start, end;
if (cdma->running)
return;
cdma->last_pos = cdma->push_buffer.pos;
start = cdma->push_buffer.dma;
end = cdma->push_buffer.size + 4;
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
/* set base, put and end pointer */
host1x_ch_writel(ch, lower_32_bits(start), HOST1X_CHANNEL_DMASTART);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, upper_32_bits(start), HOST1X_CHANNEL_DMASTART_HI);
#endif
host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMAPUT_HI);
#endif
host1x_ch_writel(ch, lower_32_bits(end), HOST1X_CHANNEL_DMAEND);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, upper_32_bits(end), HOST1X_CHANNEL_DMAEND_HI);
#endif
/* reset GET */
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
HOST1X_CHANNEL_DMACTRL_DMAGETRST |
HOST1X_CHANNEL_DMACTRL_DMAINITGET,
HOST1X_CHANNEL_DMACTRL);
/* start the command DMA */
host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
cdma->running = true;
}
/*
* Similar to cdma_start(), but rather than starting from an idle
* state (where DMA GET is set to DMA PUT), on a timeout we restore
* DMA GET from an explicit value (so DMA may again be pending).
*/
static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
u64 start, end;
if (cdma->running)
return;
cdma->last_pos = cdma->push_buffer.pos;
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
start = cdma->push_buffer.dma;
end = cdma->push_buffer.size + 4;
/* set base, end pointer (all of memory) */
host1x_ch_writel(ch, lower_32_bits(start), HOST1X_CHANNEL_DMASTART);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, upper_32_bits(start), HOST1X_CHANNEL_DMASTART_HI);
#endif
host1x_ch_writel(ch, lower_32_bits(end), HOST1X_CHANNEL_DMAEND);
#if HOST1X_HW >= 6
host1x_ch_writel(ch, upper_32_bits(end), HOST1X_CHANNEL_DMAEND_HI);
#endif
/* set GET, by loading the value in PUT (then reset GET) */
host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
HOST1X_CHANNEL_DMACTRL_DMAGETRST |
HOST1X_CHANNEL_DMACTRL_DMAINITGET,
HOST1X_CHANNEL_DMACTRL);
dev_dbg(host1x->dev,
"%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__,
host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
cdma->last_pos);
/* deassert GET reset and set PUT */
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
/* start the command DMA */
host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
cdma->running = true;
}
/*
* Kick channel DMA into action by writing its PUT offset (if it has changed)
*/
static void cdma_flush(struct host1x_cdma *cdma)
{
struct host1x_channel *ch = cdma_to_channel(cdma);
if (cdma->push_buffer.pos != cdma->last_pos) {
host1x_ch_writel(ch, cdma->push_buffer.pos,
HOST1X_CHANNEL_DMAPUT);
cdma->last_pos = cdma->push_buffer.pos;
}
}
static void cdma_stop(struct host1x_cdma *cdma)
{
struct host1x_channel *ch = cdma_to_channel(cdma);
mutex_lock(&cdma->lock);
if (cdma->running) {
host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
cdma->running = false;
}
mutex_unlock(&cdma->lock);
}
static void cdma_hw_cmdproc_stop(struct host1x *host, struct host1x_channel *ch,
bool stop)
{
#if HOST1X_HW >= 6
host1x_ch_writel(ch, stop ? 0x1 : 0x0, HOST1X_CHANNEL_CMDPROC_STOP);
#else
u32 cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
if (stop)
cmdproc_stop |= BIT(ch->id);
else
cmdproc_stop &= ~BIT(ch->id);
host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
#endif
}
static void cdma_hw_teardown(struct host1x *host, struct host1x_channel *ch)
{
#if HOST1X_HW >= 6
host1x_ch_writel(ch, 0x1, HOST1X_CHANNEL_TEARDOWN);
#else
host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
#endif
}
/*
* Stops both channel's command processor and CDMA immediately.
* Also, tears down the channel and resets corresponding module.
*/
static void cdma_freeze(struct host1x_cdma *cdma)
{
struct host1x *host = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
if (cdma->torndown && !cdma->running) {
dev_warn(host->dev, "Already torn down\n");
return;
}
dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
cdma_hw_cmdproc_stop(host, ch, true);
dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
__func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
cdma->last_pos);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
cdma_hw_teardown(host, ch);
cdma->running = false;
cdma->torndown = true;
}
static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
dev_dbg(host1x->dev,
"resuming channel (id %u, DMAGET restart = 0x%x)\n",
ch->id, getptr);
cdma_hw_cmdproc_stop(host1x, ch, false);
cdma->torndown = false;
cdma_timeout_restart(cdma, getptr);
}
/*
* If this timeout fires, it indicates the current sync_queue entry has
* exceeded its TTL and the userctx should be timed out and remaining
* submits already issued cleaned up (future submits return an error).
*/
static void cdma_timeout_handler(struct work_struct *work)
{
u32 syncpt_val;
struct host1x_cdma *cdma;
struct host1x *host1x;
struct host1x_channel *ch;
cdma = container_of(to_delayed_work(work), struct host1x_cdma,
timeout.wq);
host1x = cdma_to_host1x(cdma);
ch = cdma_to_channel(cdma);
host1x_debug_dump(cdma_to_host1x(cdma));
mutex_lock(&cdma->lock);
if (!cdma->timeout.client) {
dev_dbg(host1x->dev,
"cdma_timeout: expired, but has no clientid\n");
mutex_unlock(&cdma->lock);
return;
}
/* stop processing to get a clean snapshot */
cdma_hw_cmdproc_stop(host1x, ch, true);
syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
/* has buffer actually completed? */
if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
dev_dbg(host1x->dev,
"cdma_timeout: expired, but buffer had completed\n");
/* restore */
cdma_hw_cmdproc_stop(host1x, ch, false);
mutex_unlock(&cdma->lock);
return;
}
dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n",
__func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
syncpt_val, cdma->timeout.syncpt_val);
/* stop HW, resetting channel/module */
host1x_hw_cdma_freeze(host1x, cdma);
host1x_cdma_update_sync_queue(cdma, ch->dev);
mutex_unlock(&cdma->lock);
}
/*
* Init timeout resources
*/
static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
{
INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
cdma->timeout.initialized = true;
return 0;
}
/*
* Clean up timeout resources
*/
static void cdma_timeout_destroy(struct host1x_cdma *cdma)
{
if (cdma->timeout.initialized)
cancel_delayed_work(&cdma->timeout.wq);
cdma->timeout.initialized = false;
}
static const struct host1x_cdma_ops host1x_cdma_ops = {
.start = cdma_start,
.stop = cdma_stop,
.flush = cdma_flush,
.timeout_init = cdma_timeout_init,
.timeout_destroy = cdma_timeout_destroy,
.freeze = cdma_freeze,
.resume = cdma_resume,
.timeout_cpu_incr = cdma_timeout_cpu_incr,
};
static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = {
.init = push_buffer_init,
};
| {
"pile_set_name": "Github"
} |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.lang.actionscript.highlighting;
import com.intellij.openapi.fileTypes.SingleLazyInstanceSyntaxHighlighterFactory;
import com.intellij.openapi.fileTypes.SyntaxHighlighter;
import org.jetbrains.annotations.NotNull;
/**
* @author yole
*/
public class ECMAL4SyntaxHighlighterFactory extends SingleLazyInstanceSyntaxHighlighterFactory {
@Override
@NotNull
protected SyntaxHighlighter createHighlighter() {
return new ECMAL4Highlighter();
}
}
| {
"pile_set_name": "Github"
} |
WiFi Access Point
=================
.. doxygengroup:: wifi_ap
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "self:CNGridView Example.xcodeproj">
</FileRef>
</Workspace>
| {
"pile_set_name": "Github"
} |
# This properties file is used to create a PropertyResourceBundle
# It contains Locale specific strings used be the Synth Look and Feel.
# Currently, the following components need this for support:
#
# FileChooser
#
# When this file is read in, the strings are put into the
# defaults table. This is an implementation detail of the current
# workings of Swing. DO NOT DEPEND ON THIS.
# This may change in future versions of Swing as we improve localization
# support.
#
# Refer to the note in basic.properties for a description as to what
# the mnemonics correspond to and how to calculate them.
#
# @author Steve Wilson
############ FILE CHOOSER STRINGS #############
FileChooser.lookInLabel.textAndMnemonic=Rechercher &dans :
FileChooser.saveInLabel.textAndMnemonic=Enregistrer dans :
FileChooser.fileNameLabel.textAndMnemonic=&Nom du fichier :
FileChooser.folderNameLabel.textAndMnemonic=&Nom du dossier :
FileChooser.filesOfTypeLabel.textAndMnemonic=&Type de fichier :
FileChooser.upFolderToolTip.textAndMnemonic=Remonte d'un niveau.
FileChooser.upFolderAccessibleName=Monter
FileChooser.homeFolderToolTip.textAndMnemonic=R\u00E9pertoire de base
FileChooser.homeFolderAccessibleName=R\u00E9pertoire de base
FileChooser.newFolderToolTip.textAndMnemonic=Cr\u00E9e un dossier.
FileChooser.newFolderAccessibleName=Nouveau dossier
FileChooser.newFolderActionLabel.textAndMnemonic=Nouveau dossier
FileChooser.listViewButtonToolTip.textAndMnemonic=Liste
FileChooser.listViewButtonAccessibleName=Liste
FileChooser.listViewActionLabel.textAndMnemonic=Liste
FileChooser.detailsViewButtonToolTip.textAndMnemonic=D\u00E9tails
FileChooser.detailsViewButtonAccessibleName=D\u00E9tails
FileChooser.detailsViewActionLabel.textAndMnemonic=D\u00E9tails
FileChooser.refreshActionLabel.textAndMnemonic=Actualiser
FileChooser.viewMenuLabel.textAndMnemonic=Affichage
FileChooser.fileNameHeader.textAndMnemonic=Nom
FileChooser.fileSizeHeader.textAndMnemonic=Taille
FileChooser.fileTypeHeader.textAndMnemonic=Type
FileChooser.fileDateHeader.textAndMnemonic=Modifi\u00E9
FileChooser.fileAttrHeader.textAndMnemonic=Attributs
| {
"pile_set_name": "Github"
} |
// This is a manifest file that'll be compiled into application.js, which will include all the files
// listed below.
//
// Any JavaScript/Coffee file within this directory, lib/assets/javascripts, or any plugin's
// vendor/assets/javascripts directory can be referenced here using a relative path.
//
// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
// compiled file. JavaScript code in this file should be added after the last require_* statement.
//
// Read Sprockets README (https://github.com/rails/sprockets#sprockets-directives) for details
// about supported directives.
//
//= require rails-ujs
//= require activestorage
//= require_tree .
| {
"pile_set_name": "Github"
} |
using System;
namespace Utf8Json.Formatters
{
public sealed class AnonymousFormatter<T> : IJsonFormatter<T>
{
readonly JsonSerializeAction<T> serialize;
readonly JsonDeserializeFunc<T> deserialize;
public AnonymousFormatter(JsonSerializeAction<T> serialize, JsonDeserializeFunc<T> deserialize)
{
this.serialize = serialize;
this.deserialize = deserialize;
}
public void Serialize(ref JsonWriter writer, T value, IJsonFormatterResolver formatterResolver)
{
if (serialize == null) throw new InvalidOperationException(this.GetType().Name + " does not support Serialize.");
serialize(ref writer, value, formatterResolver);
}
public T Deserialize(ref JsonReader reader, IJsonFormatterResolver formatterResolver)
{
if (deserialize == null) throw new InvalidOperationException(this.GetType().Name + " does not support Deserialize.");
return deserialize(ref reader, formatterResolver);
}
}
}
| {
"pile_set_name": "Github"
} |
/* Copyright 2005-2006 Tim Fennell
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sourceforge.stripes.tag;
import net.sourceforge.stripes.util.Log;
import java.beans.PropertyDescriptor;
import java.beans.SimpleBeanInfo;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
/**
* <p>
* Describes the properties supported by the HtmlTagSupport class which is the
* parent of all the HTML Form/Input tags in Stripes. Exists to provide some
* flexibility in the naming of methods and primarily to provide support for the
* "class" tag attribute in JSP containers that demand a javabean
* compliant getter and setter method. Since getClass() is rather special in
* Java and cannot (and should not) be overridden, containers may not like
* calling setClass(String) without there being a corresponding
* getClass():String method. So the PropertyDescriptor for the "class"
* property specifies the methods getCssClass() and setCssClass.</p>
*
* @author Tim Fennell
*/
public class HtmlTagSupportBeanInfo extends SimpleBeanInfo {
private static final Log log = Log.getInstance(HtmlTagSupportBeanInfo.class);
/**
* Generates a simple set of PropertyDescriptors for the HtmlTagSupport
* class.
* @return
*/
@Override
public PropertyDescriptor[] getPropertyDescriptors() {
try {
List<PropertyDescriptor> descriptors = new ArrayList<PropertyDescriptor>();
// Add the tricky one first
Method getter = HtmlTagSupport.class.getMethod("getCssClass");
Method setter = HtmlTagSupport.class.getMethod("setCssClass", String.class);
descriptors.add(new PropertyDescriptor("class", getter, setter));
// Now do all the vanilla properties
descriptors.add(new PropertyDescriptor("id", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("title", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("style", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("dir", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("lang", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("tabindex", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("accesskey", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onfocus", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onblur", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onselect", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onchange", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onclick", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("ondblclick", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onmousedown", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onmouseup", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onmouseover", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onmousemove", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onmouseout", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onkeypress", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onkeydown", HtmlTagSupport.class));
descriptors.add(new PropertyDescriptor("onkeyup", HtmlTagSupport.class));
PropertyDescriptor[] array = new PropertyDescriptor[descriptors.size()];
return descriptors.toArray(array);
} catch (Exception e) {
// This is crazy talk, we're only doing things that should always succeed
log.fatal(e, "Could not construct bean info for HtmlTagSupport. This is very bad.");
return null;
}
}
}
| {
"pile_set_name": "Github"
} |
这次去泉州有三个国外客人一起同行,还是入住了泉酒. 大堂有水仙花装饰,有一股清香.环境很好.以390元的价格入住,超值. 客人称赞酒店很不错,我想这是每个入住客人的同感.
| {
"pile_set_name": "Github"
} |
package sdkrand
import (
"math/rand"
"sync"
"time"
)
// lockedSource is a thread-safe implementation of rand.Source
type lockedSource struct {
lk sync.Mutex
src rand.Source
}
func (r *lockedSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
func (r *lockedSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}
// SeededRand is a new RNG using a thread safe implementation of rand.Source
var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
| {
"pile_set_name": "Github"
} |
<?xml version='1.0'?>
<gl_extension name="GLES_EXT_shader_non_constant_global_initializers" reg_no="264">
</gl_extension>
| {
"pile_set_name": "Github"
} |
{-# LANGUAGE OverloadedLists #-}
main = print (length (['a'..(10 :: Int)] :: [Int]))
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Food Tracker | Home</title>
<link href="https://fonts.googleapis.com/css?family=Oswald|Ubuntu:400,700" rel="stylesheet">
<!-- Bootstrap CSS -->
<link rel="stylesheet" href="./bs/css/united.min.css" />
<link rel="stylesheet" href="./styles.css" />
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="container-fluid" id="wrapper">
<div class="row" id="homeJumbo">
<div class="overlay"></div>
<div class="col-lg-12">
<h1>
<a href="./">FOOD TRACKER</a>
</h1>
<div class="row t20">
<div class="col-lg-4 col-lg-offset-4">
<ul class="nav nav-pills nav-justified">
<li role="presentation" class="active">
<a href="./" title="Home">Home</a>
</li>
<li role="presentation">
<a href="./add.html" title="Add Food Item">Add Food Item</a>
</li>
</ul>
</div>
</div>
</div>
</div>
<div class="row t20">
<div class="col-lg-4 col-lg-offset-4">
<div class="panel panel-default">
<div class="panel-body">
<form action="" method="POST" role="form">
<div class="form-group">
<label><i class="glyphicon glyphicon-plus-sign"></i> Add New Date</label>
<div class="input-group">
<input type="date" class="form-control" placeholder="Select New Date" aria-label="date" aria-describedby="add-btn">
<span class="input-group-btn">
<button type="submit" class="btn btn-primary" id="add-btn">
<i class="glyphicon glyphicon-plus-sign"></i> Add Date
</button>
</span>
</div>
</div>
</form>
</div>
</div>
</div>
</div>
<div class="row t20">
<div class="col-lg-12">
<h4 class="text-center text-white"><strong>Existing Records (30)</strong></h4>
</div>
</div>
<div class="row t20">
<div class="col-lg-3">
<div class="panel panel-warning">
<div class="panel-body text-bold">
<i class="glyphicon glyphicon-calendar"></i> December 14, 2017
<a class="btn btn-primary btn-xs pull-right" href="./view.html">
View <i class="glyphicon glyphicon-arrow-right"></i>
</a>
</div>
<ul class="list-group">
<li class="list-group-item text-bold"><span class="badge">60</span> Protein</li>
<li class="list-group-item text-bold"><span class="badge">49</span> Carbohydrate</li>
<li class="list-group-item text-bold"><span class="badge">55</span> Fat</li>
<li class="list-group-item text-bold"><span class="badge">955</span> Calories</li>
</ul>
</div>
</div>
<div class="col-lg-3">
<div class="panel panel-warning">
<div class="panel-body text-bold">
<i class="glyphicon glyphicon-calendar"></i> December 14, 2017
<a class="btn btn-primary btn-xs pull-right" href="./view.html">
View <i class="glyphicon glyphicon-arrow-right"></i>
</a>
</div>
<ul class="list-group">
<li class="list-group-item text-bold"><span class="badge">60</span> Protein</li>
<li class="list-group-item text-bold"><span class="badge">49</span> Carbohydrate</li>
<li class="list-group-item text-bold"><span class="badge">55</span> Fat</li>
<li class="list-group-item text-bold"><span class="badge">955</span> Calories</li>
</ul>
</div>
</div>
<div class="col-lg-3">
<div class="panel panel-warning">
<div class="panel-body text-bold">
<i class="glyphicon glyphicon-calendar"></i> December 14, 2017
<a class="btn btn-primary btn-xs pull-right" href="./view.html">
View <i class="glyphicon glyphicon-arrow-right"></i>
</a>
</div>
<ul class="list-group">
<li class="list-group-item text-bold"><span class="badge">60</span> Protein</li>
<li class="list-group-item text-bold"><span class="badge">49</span> Carbohydrate</li>
<li class="list-group-item text-bold"><span class="badge">55</span> Fat</li>
<li class="list-group-item text-bold"><span class="badge">955</span> Calories</li>
</ul>
</div>
</div>
<div class="col-lg-3">
<div class="panel panel-warning">
<div class="panel-body text-bold">
<i class="glyphicon glyphicon-calendar"></i> December 14, 2017
<a class="btn btn-primary btn-xs pull-right" href="./view.html">
View <i class="glyphicon glyphicon-arrow-right"></i>
</a>
</div>
<ul class="list-group">
<li class="list-group-item text-bold"><span class="badge">60</span> Protein</li>
<li class="list-group-item text-bold"><span class="badge">49</span> Carbohydrate</li>
<li class="list-group-item text-bold"><span class="badge">55</span> Fat</li>
<li class="list-group-item text-bold"><span class="badge">955</span> Calories</li>
</ul>
</div>
</div>
</div>
<footer>
<div class="row pad">
<div class="col-lg-12 text-center">
Copyright © 2017 <strong>Food Tracker</strong>
</div>
</div>
</footer>
</div>
<!-- jQuery -->
<script src="./bs/js/jquery.min.js"></script>
<!-- Bootstrap JavaScript -->
<script src="./bs/js/bootstrap.min.js"></script>
</body>
</html> | {
"pile_set_name": "Github"
} |
using System;
using System.Collections.Generic;
using System.Linq;
using GraphQL.Types;
using GraphQL.Utilities;
using GraphQL.Validation;
using Xunit;
namespace GraphQL.Tests.Execution
{
public class UnionInterfaceTests : QueryTestBase<UnionSchema>
{
private readonly Person _john;
public UnionInterfaceTests()
{
Services.Register<DogType>();
Services.Register<CatType>();
Services.Register<PetType>();
Services.Register<PersonType>();
Services.Singleton(new UnionSchema(new SimpleContainerAdapter(Services)));
var garfield = new Cat
{
Name = "Garfield",
Meows = false
};
var odie = new Dog
{
Name = "Odie",
Barks = true
};
var liz = new Person
{
Name = "Liz",
Pets = new List<IPet>(),
Friends = new List<INamed>()
};
_john = new Person
{
Name = "John",
Pets = new List<IPet>
{
garfield,
odie
},
Friends = new List<INamed>
{
liz,
odie
}
};
}
[Fact]
public void can_introspect_on_union_and_intersection_types()
{
var query = @"
query AQuery {
Named: __type(name: ""Named"") {
kind
name
fields { name }
interfaces { name }
possibleTypes { name }
enumValues { name }
inputFields { name }
}
Pet: __type(name: ""Pet"") {
kind
name
fields { name }
interfaces { name }
possibleTypes { name }
enumValues { name }
inputFields { name }
}
}
";
var expected = @"{
""Named"": {
""kind"": ""INTERFACE"",
""name"": ""Named"",
""fields"": [
{ ""name"": ""name"" }
],
""interfaces"": null,
""possibleTypes"": [
{ ""name"": ""Dog"" },
{ ""name"": ""Cat"" },
{ ""name"": ""Person"" }
],
""enumValues"": null,
""inputFields"": null
},
""Pet"": {
""kind"": ""UNION"",
""name"": ""Pet"",
""fields"": null,
""interfaces"": null,
""possibleTypes"": [
{ ""name"": ""Dog"" },
{ ""name"": ""Cat"" }
],
""enumValues"": null,
""inputFields"": null
}
}";
AssertQuerySuccess(query, expected);
}
[Fact]
public void executes_using_union_types()
{
// NOTE: This is an *invalid* query, but it should be an *executable* query.
var query = @"
query AQuery {
__typename
name
pets {
__typename
name
barks
meows
}
}
";
var expected = @"
{
""__typename"": ""Person"",
""name"": ""John"",
""pets"": [
{ ""__typename"": ""Cat"", ""name"": ""Garfield"", ""meows"": false },
{ ""__typename"": ""Dog"", ""name"": ""Odie"", ""barks"": true }
]
}
";
AssertQuerySuccess(query, expected, root: _john, rules: Enumerable.Empty<IValidationRule>());
}
[Fact]
public void executes_union_types_with_inline_fragments()
{
// This is the valid version of the query in the above test.
var query = @"
query AQuery {
__typename
name
pets {
__typename
... on Dog {
name
barks
},
... on Cat {
name
meows
}
}
}
";
var expected = @"
{
""__typename"": ""Person"",
""name"": ""John"",
""pets"": [
{ ""__typename"": ""Cat"", ""name"": ""Garfield"", ""meows"": false },
{ ""__typename"": ""Dog"", ""name"": ""Odie"", ""barks"": true }
]
}
";
AssertQuerySuccess(query, expected, root: _john);
}
[Fact]
public void executes_using_interface_types()
{
// NOTE: This is an *invalid* query, but it should be an *executable* query.
var query = @"
query AQuery {
__typename
name
friends {
__typename
name
barks
meows
}
}
";
var expected = @"
{
""__typename"": ""Person"",
""name"": ""John"",
""friends"": [
{ ""__typename"": ""Person"", ""name"": ""Liz"" },
{ ""__typename"": ""Dog"", ""name"": ""Odie"", ""barks"": true }
]
}
";
AssertQuerySuccess(query, expected, root: _john, rules: Enumerable.Empty<IValidationRule>());
}
[Fact]
public void allows_fragment_conditions_to_be_abstract_types()
{
var query = @"
query AQuery {
__typename
name
pets { ...PetFields }
friends { ...FriendFields }
}
fragment PetFields on Pet {
__typename
... on Dog {
name
barks
},
... on Cat {
name
meows
}
}
fragment FriendFields on Named {
__typename
name
... on Dog {
barks
},
... on Cat {
meows
}
}
";
var expected = @"
{
""__typename"": ""Person"",
""name"": ""John"",
""pets"": [
{ ""__typename"": ""Cat"", ""name"": ""Garfield"", ""meows"": false },
{ ""__typename"": ""Dog"", ""name"": ""Odie"", ""barks"": true }
],
""friends"": [
{ ""__typename"": ""Person"", ""name"": ""Liz"" },
{ ""__typename"": ""Dog"", ""name"": ""Odie"", ""barks"": true }
]
}
";
AssertQuerySuccess(query, expected, root: _john);
}
}
public interface INamed
{
string Name { get; set; }
}
public interface IPet : INamed
{
}
public class Dog : IPet
{
public string Name { get; set; }
public bool Barks { get; set; }
}
public class Cat : IPet
{
public string Name { get; set; }
public bool Meows { get; set; }
}
public class Person : INamed
{
public string Name { get; set; }
public List<IPet> Pets { get; set; }
public List<INamed> Friends { get; set; }
}
public class NamedType : InterfaceGraphType
{
public NamedType()
{
Name = "Named";
Field<StringGraphType>("name");
}
}
public class DogType : ObjectGraphType<Dog>
{
public DogType()
{
Name = "Dog";
Field<StringGraphType>("name");
Field<BooleanGraphType>("barks");
Interface<NamedType>();
}
}
public class CatType : ObjectGraphType<Cat>
{
public CatType()
{
Name = "Cat";
Field<StringGraphType>("name");
Field<BooleanGraphType>("meows");
Interface<NamedType>();
}
}
public class PetType : UnionGraphType
{
public PetType()
{
Name = "Pet";
Type<DogType>();
Type<CatType>();
}
}
public class PersonType : ObjectGraphType<Person>
{
public PersonType()
{
Name = "Person";
Field<StringGraphType>("name");
Field<ListGraphType<PetType>>("pets");
Field<ListGraphType<NamedType>>("friends");
Interface<NamedType>();
}
}
public class UnionSchema : Schema
{
public UnionSchema(IServiceProvider resolver)
: base(resolver)
{
Query = resolver.GetRequiredService<PersonType>();
}
}
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import csv
import re
import json
import os
import random
import subprocess
import sys
import time
import urllib2
import zlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
OWNERS_PATH = os.path.abspath(
os.path.join(BASE_DIR, '..', 'test', 'test_owners.csv'))
OWNERS_JSON_PATH = OWNERS_PATH.replace('.csv', '.json')
GCS_URL_BASE = 'https://storage.googleapis.com/kubernetes-test-history/'
SKIP_MAINTAINERS = {
'a-robinson', 'aronchick', 'bgrant0607-nocc', 'david-mcmahon',
'goltermann', 'sarahnovotny'}
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def get_test_history(days_ago):
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json',
time.gmtime(time.time() - days_ago * 24 * 60 * 60))
resp = urllib2.urlopen(url)
content = resp.read()
if resp.headers.get('content-encoding') == 'gzip':
content = zlib.decompress(content, 15 | 16)
return json.loads(content)
def get_test_names_from_test_history():
test_names = set()
for days_ago in range(4):
test_history = get_test_history(days_ago)
test_names.update(normalize(name) for name in test_history['test_names'])
return test_names
def get_test_names_from_local_files():
tests_json = subprocess.check_output(['go', 'run', 'test/list/main.go', '-json'])
tests = json.loads(tests_json)
return {normalize(t['Name'] + (' ' + t['TestName'] if 'k8s.io/' not in t['Name'] else ''))
for t in tests}
def load_owners(fname):
owners = {}
with open(fname) as f:
for n, cols in enumerate(csv.reader(f)):
if n == 0:
continue # header
if len(cols) == 3:
# migrate from previous version without sig
(name, owner, random_assignment), sig = cols, ""
else:
(name, owner, random_assignment, sig) = cols
owners[normalize(name)] = (owner, int(random_assignment), sig)
return owners
def write_owners(fname, owners):
with open(fname, 'w') as f:
out = csv.writer(f, lineterminator='\n')
out.writerow(['name', 'owner', 'auto-assigned', 'sig'])
items = sorted(owners.items())
for name, (owner, random_assignment, sig) in items:
out.writerow([name, owner, int(random_assignment), sig])
def get_maintainers():
# Github doesn't seem to support team membership listing without a key with
# org admin privileges. Instead, we do it manually:
# Open https://github.com/orgs/kubernetes/teams/kubernetes-maintainers
# Run this in the js console:
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(
# e => e.textContent.trim())
ret = {"alex-mohr", "apelisse", "aronchick", "bgrant0607", "bgrant0607-nocc",
"bprashanth", "brendandburns", "caesarxuchao", "childsb", "cjcullen",
"david-mcmahon", "davidopp", "dchen1107", "deads2k", "derekwaynecarr",
"eparis", "erictune", "fabioy", "fejta", "fgrzadkowski", "freehan",
"gmarek", "grodrigues3", "ingvagabund", "ixdy", "janetkuo", "jbeda",
"jessfraz", "jingxu97", "jlowdermilk", "jsafrane", "jszczepkowski",
"justinsb", "Kashomon", "kevin-wangzefeng", "krousey",
"lavalamp", "liggitt", "luxas", "madhusudancs", "maisem", "matchstick",
"mbohlool", "mikedanese", "mml", "mtaufen", "mwielgus", "ncdc",
"nikhiljindal", "piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole",
"Random-Liu", "rmmh", "roberthbailey", "saad-ali", "smarterclayton",
"soltysh", "spxtr", "sttts", "thelinuxfoundation", "thockin",
"timothysc", "tallclair", "vishh", "wojtek-t", "xiang90", "yifan-gu",
"yujuhong", "zmerlynn"}
return sorted(ret - SKIP_MAINTAINERS)
def detect_github_username():
origin_url = subprocess.check_output(['git', 'config', 'remote.origin.url'])
m = re.search(r'github.com[:/](.*)/', origin_url)
if m and m.group(1) != 'kubernetes':
return m.group(1)
raise ValueError('unable to determine GitHub user from '
'`git config remote.origin.url` output, run with --user instead')
def sig_prefixes(owners):
# TODO(rmmh): make sig prefixes the only thing in test_owners!
# Precise test names aren't very interesting.
owns = []
for test, (owner, random_assignment, sig) in owners.iteritems():
if 'k8s.io/' in test or not sig:
continue
owns.append([test, sig])
while True:
owns.sort()
for name, sig in owns:
# try removing the last word in the name, use it if all tests beginning
# with this shorter name share the same sig.
maybe_prefix = ' '.join(name.split()[:-1])
matches = [other_sig == sig for other_name, other_sig in owns if other_name.startswith(maybe_prefix)]
if matches and all(matches):
owns = [[n, s] for n, s in owns if not n.startswith(maybe_prefix)]
owns.append([maybe_prefix, sig])
break
else: # iterated completely through owns without any changes
break
sigs = {}
for name, sig in owns:
sigs.setdefault(sig, []).append(name)
return json.dumps(sigs, sort_keys=True, indent=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--history', action='store_true', help='Generate test list from result history.')
parser.add_argument('--user', help='User to assign new tests to (or RANDOM, default: current GitHub user).')
parser.add_argument('--addonly', action='store_true', help='Only add missing tests, do not change existing.')
parser.add_argument('--check', action='store_true', help='Exit with a nonzero status if the test list has changed.')
parser.add_argument('--print_sig_prefixes', action='store_true', help='Emit SIG prefixes for matching.')
options = parser.parse_args()
if options.history:
test_names = get_test_names_from_test_history()
else:
test_names = get_test_names_from_local_files()
test_names = sorted(test_names)
owners = load_owners(OWNERS_PATH)
prefixes = sig_prefixes(owners)
with open(OWNERS_JSON_PATH, 'w') as f:
f.write(prefixes + '\n')
if options.print_sig_prefixes:
print prefixes
return
outdated_tests = sorted(set(owners) - set(test_names))
new_tests = sorted(set(test_names) - set(owners))
maintainers = get_maintainers()
print '# OUTDATED TESTS (%d):' % len(outdated_tests)
print '\n'.join('%s -- %s%s' %
(t, owners[t][0], ['', ' (random)'][owners[t][1]])
for t in outdated_tests)
print '# NEW TESTS (%d):' % len(new_tests)
print '\n'.join(new_tests)
if options.check:
if new_tests or outdated_tests:
print
print 'ERROR: the test list has changed'
sys.exit(1)
sys.exit(0)
if not options.user:
options.user = detect_github_username()
for name in outdated_tests:
owners.pop(name)
if not options.addonly:
print '# UNEXPECTED MAINTAINERS ',
print '(randomly assigned, but not in kubernetes-maintainers)'
for name, (owner, random_assignment, _) in sorted(owners.iteritems()):
if random_assignment and owner not in maintainers:
print '%-16s %s' % (owner, name)
owners.pop(name)
print
owner_counts = collections.Counter(
owner for name, (owner, random, sig) in owners.iteritems()
if owner in maintainers)
for test_name in set(test_names) - set(owners):
random_assignment = True
if options.user.lower() == 'random':
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
else:
new_owner = options.user
random_assignment = False
owner_counts[new_owner] += 1
owners[test_name] = (new_owner, random_assignment, "")
if options.user.lower() == 'random':
print '# Tests per maintainer:'
for owner, count in owner_counts.most_common():
print '%-20s %3d' % (owner, count)
write_owners(OWNERS_PATH, owners)
if __name__ == '__main__':
main()
| {
"pile_set_name": "Github"
} |
package com.hadihariri.kotlincourse.interop;
import com.hadihariri.kotlincourse.classes.CustomerKotlin;
import com.hadihariri.kotlincourse.classes.Status;
import java.io.IOException;
/**
* Created by hadihariri on 25/08/16.
*/
public class TalkingToKotlin {
public void loadStats(CustomerKotlin customerKotlin) {
/* try {
// customerKotlin.loadStatistics("filename");
} catch (IOException e) {
e.printStackTrace();
}
*/
}
public static void main(String[] args) {
CustomerKotlin customerKotlin = new CustomerKotlin(1, "Hadi", "[email protected]");
customerKotlin.setEmail("[email protected]");
customerKotlin.changeStatus(Status.Current);
customerKotlin.preferential();
UtilityClass.prefix("some", "value");
int copyrightYear = UtilityClass.CopyrightYear;
}
}
| {
"pile_set_name": "Github"
} |
*NERD_tree.txt* A tree explorer plugin that owns your momma!
omg its ... ~
________ ________ _ ____________ ____ __________ ____________~
/_ __/ / / / ____/ / | / / ____/ __ \/ __ \ /_ __/ __ \/ ____/ ____/~
/ / / /_/ / __/ / |/ / __/ / /_/ / / / / / / / /_/ / __/ / __/ ~
/ / / __ / /___ / /| / /___/ _, _/ /_/ / / / / _, _/ /___/ /___ ~
/_/ /_/ /_/_____/ /_/ |_/_____/_/ |_/_____/ /_/ /_/ |_/_____/_____/ ~
Reference Manual~
==============================================================================
CONTENTS *NERDTree-contents*
1.Intro...................................|NERDTree|
2.Functionality provided..................|NERDTreeFunctionality|
2.1.Global commands...................|NERDTreeGlobalCommands|
2.2.Bookmarks.........................|NERDTreeBookmarks|
2.2.1.The bookmark table..........|NERDTreeBookmarkTable|
2.2.2.Bookmark commands...........|NERDTreeBookmarkCommands|
2.2.3.Invalid bookmarks...........|NERDTreeInvalidBookmarks|
2.3.NERD tree mappings................|NERDTreeMappings|
2.4.The NERD tree menu................|NERDTreeMenu|
3.Options.................................|NERDTreeOptions|
3.1.Option summary....................|NERDTreeOptionSummary|
3.2.Option details....................|NERDTreeOptionDetails|
4.The NERD tree API.......................|NERDTreeAPI|
4.1.Key map API.......................|NERDTreeKeymapAPI|
4.2.Menu API..........................|NERDTreeMenuAPI|
4.3.Menu API..........................|NERDTreeAddPathFilter()|
4.4.Path Listener API.................|NERDTreePathListenerAPI|
5.About...................................|NERDTreeAbout|
6.License.................................|NERDTreeLicense|
==============================================================================
1. Intro *NERDTree*
What is this "NERD tree"??
The NERD tree allows you to explore your filesystem and to open files and
directories. It presents the filesystem to you in the form of a tree which you
manipulate with the keyboard and/or mouse. It also allows you to perform
simple filesystem operations.
The following features and functionality are provided by the NERD tree:
* Files and directories are displayed in a hierarchical tree structure
* Different highlighting is provided for the following types of nodes:
* files
* directories
* sym-links
* windows .lnk files
* read-only files
* executable files
* Many (customisable) mappings are provided to manipulate the tree:
* Mappings to open/close/explore directory nodes
* Mappings to open files in new/existing windows/tabs
* Mappings to change the current root of the tree
* Mappings to navigate around the tree
* ...
* Directories and files can be bookmarked.
* Most NERD tree navigation can also be done with the mouse
* Filtering of tree content (can be toggled at runtime)
* custom file filters to prevent e.g. vim backup files being displayed
* optional displaying of hidden files (. files)
* files can be "turned off" so that only directories are displayed
* The position and size of the NERD tree window can be customised
* The order in which the nodes in the tree are listed can be customised.
* A model of your filesystem is created/maintained as you explore it. This
has several advantages:
* All filesystem information is cached and is only re-read on demand
* If you revisit a part of the tree that you left earlier in your
session, the directory nodes will be opened/closed as you left them
* The script remembers the cursor position and window position in the NERD
tree so you can toggle it off (or just close the tree window) and then
reopen it (with NERDTreeToggle) the NERD tree window will appear exactly
as you left it
* You can have a separate NERD tree for each tab, share trees across tabs,
or a mix of both.
* By default the script overrides the default file browser (netrw), so if
you :edit a directory a (slightly modified) NERD tree will appear in the
current window
* A programmable menu system is provided (simulates right clicking on a
node)
* one default menu plugin is provided to perform basic filesystem
operations (create/delete/move/copy files/directories)
* There's an API for adding your own keymappings
==============================================================================
2. Functionality provided *NERDTreeFunctionality*
------------------------------------------------------------------------------
2.1. Global Commands *NERDTreeGlobalCommands*
:NERDTree [<start-directory> | <bookmark>] *:NERDTree*
Opens a fresh NERD tree. The root of the tree depends on the argument
given. There are 3 cases: If no argument is given, the current directory
will be used. If a directory is given, that will be used. If a bookmark
name is given, the corresponding directory will be used. For example: >
:NERDTree /home/marty/vim7/src
:NERDTree foo (foo is the name of a bookmark)
<
:NERDTreeFromBookmark <bookmark> *:NERDTreeFromBookmark*
Opens a fresh NERD tree with the root initialized to the dir for
<bookmark>. The only reason to use this command over :NERDTree is for
the completion (which is for bookmarks rather than directories).
:NERDTreeToggle [<start-directory> | <bookmark>] *:NERDTreeToggle*
If a NERD tree already exists for this tab, it is reopened and rendered
again. If no NERD tree exists for this tab then this command acts the
same as the |:NERDTree| command.
:NERDTreeMirror *:NERDTreeMirror*
Shares an existing NERD tree, from another tab, in the current tab.
Changes made to one tree are reflected in both as they are actually the
same buffer.
If only one other NERD tree exists, that tree is automatically mirrored. If
more than one exists, the script will ask which tree to mirror.
:NERDTreeClose *:NERDTreeClose*
Close the NERD tree in this tab.
:NERDTreeFind *:NERDTreeFind*
Find the current file in the tree.
If no tree exists and the current file is under vim's CWD, then init a
tree at the CWD and reveal the file. Otherwise init a tree in the current
file's directory.
In any case, the current file is revealed and the cursor is placed on it.
:NERDTreeCWD *:NERDTreeCWD*
Change tree root to current directory. If no NERD tree exists for this
tab, a new tree will be opened.
------------------------------------------------------------------------------
2.2. Bookmarks *NERDTreeBookmarks*
Bookmarks in the NERD tree are a way to tag files or directories of interest.
For example, you could use bookmarks to tag all of your project directories.
------------------------------------------------------------------------------
2.2.1. The Bookmark Table *NERDTreeBookmarkTable*
If the bookmark table is active (see |NERDTree-B| and
|'NERDTreeShowBookmarks'|), it will be rendered above the tree. You can double
click bookmarks or use the |NERDTree-o| mapping to activate them. See also,
|NERDTree-t| and |NERDTree-T|
------------------------------------------------------------------------------
2.2.2. Bookmark commands *NERDTreeBookmarkCommands*
Note that the following commands are only available in the NERD tree buffer.
:Bookmark [<name>]
Bookmark the current node as <name>. If there is already a <name>
bookmark, it is overwritten. <name> must not contain spaces.
If <name> is not provided, it defaults to the file or directory name.
For directories, a trailing slash is present.
:BookmarkToRoot <bookmark>
Make the directory corresponding to <bookmark> the new root. If a treenode
corresponding to <bookmark> is already cached somewhere in the tree then
the current tree will be used, otherwise a fresh tree will be opened.
Note that if <bookmark> points to a file then its parent will be used
instead.
:RevealBookmark <bookmark>
If the node is cached under the current root then it will be revealed
(i.e. directory nodes above it will be opened) and the cursor will be
placed on it.
:OpenBookmark <bookmark>
<bookmark> must point to a file. The file is opened as though |NERDTree-o|
was applied. If the node is cached under the current root then it will be
revealed and the cursor will be placed on it.
:ClearBookmarks [<bookmarks>]
Remove all the given bookmarks. If no bookmarks are given then remove all
bookmarks on the current node.
:ClearAllBookmarks
Remove all bookmarks.
:ReadBookmarks
Re-read the bookmarks in the |'NERDTreeBookmarksFile'|.
See also |:NERDTree| and |:NERDTreeFromBookmark|.
------------------------------------------------------------------------------
2.2.3. Invalid Bookmarks *NERDTreeInvalidBookmarks*
If invalid bookmarks are detected, the script will issue an error message and
the invalid bookmarks will become unavailable for use.
These bookmarks will still be stored in the bookmarks file (see
|'NERDTreeBookmarksFile'|), down the bottom. There will always be a blank line
after the valid bookmarks but before the invalid ones.
Each line in the bookmarks file represents one bookmark. The proper format is:
<bookmark name><space><full path to the bookmark location>
After you have corrected any invalid bookmarks, either restart vim, or go
:ReadBookmarks from the NERD tree window.
------------------------------------------------------------------------------
2.3. NERD tree Mappings *NERDTreeMappings*
Default Description~ help-tag~
Key~
o.......Open files, directories and bookmarks....................|NERDTree-o|
go......Open selected file, but leave cursor in the NERDTree.....|NERDTree-go|
t.......Open selected node/bookmark in a new tab.................|NERDTree-t|
T.......Same as 't' but keep the focus on the current tab........|NERDTree-T|
i.......Open selected file in a split window.....................|NERDTree-i|
gi......Same as i, but leave the cursor on the NERDTree..........|NERDTree-gi|
s.......Open selected file in a new vsplit.......................|NERDTree-s|
gs......Same as s, but leave the cursor on the NERDTree..........|NERDTree-gs|
O.......Recursively open the selected directory..................|NERDTree-O|
x.......Close the current nodes parent...........................|NERDTree-x|
X.......Recursively close all children of the current node.......|NERDTree-X|
e.......Edit the current dir.....................................|NERDTree-e|
<CR>...............same as |NERDTree-o|.
double-click.......same as the |NERDTree-o| map.
middle-click.......same as |NERDTree-i| for files, same as
|NERDTree-e| for dirs.
D.......Delete the current bookmark .............................|NERDTree-D|
P.......Jump to the root node....................................|NERDTree-P|
p.......Jump to current nodes parent.............................|NERDTree-p|
K.......Jump up inside directories at the current tree depth.....|NERDTree-K|
J.......Jump down inside directories at the current tree depth...|NERDTree-J|
<C-J>...Jump down to the next sibling of the current directory...|NERDTree-C-J|
<C-K>...Jump up to the previous sibling of the current directory.|NERDTree-C-K|
C.......Change the tree root to the selected dir.................|NERDTree-C|
u.......Move the tree root up one directory......................|NERDTree-u|
U.......Same as 'u' except the old root node is left open........|NERDTree-U|
r.......Recursively refresh the current directory................|NERDTree-r|
R.......Recursively refresh the current root.....................|NERDTree-R|
m.......Display the NERD tree menu...............................|NERDTree-m|
cd......Change the CWD to the dir of the selected node...........|NERDTree-cd|
CD......Change tree root to the CWD..............................|NERDTree-CD|
I.......Toggle whether hidden files displayed....................|NERDTree-I|
f.......Toggle whether the file filters are used.................|NERDTree-f|
F.......Toggle whether files are displayed.......................|NERDTree-F|
B.......Toggle whether the bookmark table is displayed...........|NERDTree-B|
q.......Close the NERDTree window................................|NERDTree-q|
A.......Zoom (maximize/minimize) the NERDTree window.............|NERDTree-A|
?.......Toggle the display of the quick help.....................|NERDTree-?|
------------------------------------------------------------------------------
*NERDTree-o*
Default key: o
Map option: NERDTreeMapActivateNode
Applies to: files and directories.
If a file node is selected, it is opened in the previous window.
If a directory is selected it is opened or closed depending on its current
state.
If a bookmark that links to a directory is selected then that directory
becomes the new root.
If a bookmark that links to a file is selected then that file is opened in the
previous window.
------------------------------------------------------------------------------
*NERDTree-go*
Default key: go
Map option: None
Applies to: files.
If a file node is selected, it is opened in the previous window, but the
cursor does not move.
The key combo for this mapping is always "g" + NERDTreeMapActivateNode (see
|NERDTree-o|).
------------------------------------------------------------------------------
*NERDTree-t*
Default key: t
Map option: NERDTreeMapOpenInTab
Applies to: files and directories.
Opens the selected file in a new tab. If a directory is selected, a fresh
NERD Tree for that directory is opened in a new tab.
If a bookmark which points to a directory is selected, open a NERD tree for
that directory in a new tab. If the bookmark points to a file, open that file
in a new tab.
------------------------------------------------------------------------------
*NERDTree-T*
Default key: T
Map option: NERDTreeMapOpenInTabSilent
Applies to: files and directories.
The same as |NERDTree-t| except that the focus is kept in the current tab.
------------------------------------------------------------------------------
*NERDTree-i*
Default key: i
Map option: NERDTreeMapOpenSplit
Applies to: files.
Opens the selected file in a new split window and puts the cursor in the new
window.
------------------------------------------------------------------------------
*NERDTree-gi*
Default key: gi
Map option: None
Applies to: files.
The same as |NERDTree-i| except that the cursor is not moved.
The key combo for this mapping is always "g" + NERDTreeMapOpenSplit (see
|NERDTree-i|).
------------------------------------------------------------------------------
*NERDTree-s*
Default key: s
Map option: NERDTreeMapOpenVSplit
Applies to: files.
Opens the selected file in a new vertically split window and puts the cursor in
the new window.
------------------------------------------------------------------------------
*NERDTree-gs*
Default key: gs
Map option: None
Applies to: files.
The same as |NERDTree-s| except that the cursor is not moved.
The key combo for this mapping is always "g" + NERDTreeMapOpenVSplit (see
|NERDTree-s|).
------------------------------------------------------------------------------
*NERDTree-O*
Default key: O
Map option: NERDTreeMapOpenRecursively
Applies to: directories.
Recursively opens the selected directory.
All files and directories are cached, but if a directory would not be
displayed due to file filters (see |'NERDTreeIgnore'| |NERDTree-f|) or the
hidden file filter (see |'NERDTreeShowHidden'|) then its contents are not
cached. This is handy, especially if you have .svn directories.
------------------------------------------------------------------------------
*NERDTree-x*
Default key: x
Map option: NERDTreeMapCloseDir
Applies to: files and directories.
Closes the parent of the selected node.
------------------------------------------------------------------------------
*NERDTree-X*
Default key: X
Map option: NERDTreeMapCloseChildren
Applies to: directories.
Recursively closes all children of the selected directory.
Tip: To quickly "reset" the tree, use |NERDTree-P| with this mapping.
------------------------------------------------------------------------------
*NERDTree-e*
Default key: e
Map option: NERDTreeMapOpenExpl
Applies to: files and directories.
|:edit|s the selected directory, or the selected file's directory. This could
result in a NERD tree or a netrw being opened, depending on
|'NERDTreeHijackNetrw'|.
------------------------------------------------------------------------------
*NERDTree-D*
Default key: D
Map option: NERDTreeMapDeleteBookmark
Applies to: lines in the bookmarks table
Deletes the currently selected bookmark.
------------------------------------------------------------------------------
*NERDTree-P*
Default key: P
Map option: NERDTreeMapJumpRoot
Applies to: no restrictions.
Jump to the tree root.
------------------------------------------------------------------------------
*NERDTree-p*
Default key: p
Map option: NERDTreeMapJumpParent
Applies to: files and directories.
Jump to the parent node of the selected node.
------------------------------------------------------------------------------
*NERDTree-K*
Default key: K
Map option: NERDTreeMapJumpFirstChild
Applies to: files and directories.
Jump to the first child of the current nodes parent.
If the cursor is already on the first node then do the following:
* loop back thru the siblings of the current nodes parent until we find an
open dir with children
* go to the first child of that node
------------------------------------------------------------------------------
*NERDTree-J*
Default key: J
Map option: NERDTreeMapJumpLastChild
Applies to: files and directories.
Jump to the last child of the current nodes parent.
If the cursor is already on the last node then do the following:
* loop forward thru the siblings of the current nodes parent until we find
an open dir with children
* go to the last child of that node
------------------------------------------------------------------------------
*NERDTree-C-J*
Default key: <C-J>
Map option: NERDTreeMapJumpNextSibling
Applies to: files and directories.
Jump to the next sibling of the selected node.
------------------------------------------------------------------------------
*NERDTree-C-K*
Default key: <C-K>
Map option: NERDTreeMapJumpPrevSibling
Applies to: files and directories.
Jump to the previous sibling of the selected node.
------------------------------------------------------------------------------
*NERDTree-C*
Default key: C
Map option: NERDTreeMapChangeRoot
Applies to: files and directories.
Make the selected directory node the new tree root. If a file is selected, its
parent is used.
------------------------------------------------------------------------------
*NERDTree-u*
Default key: u
Map option: NERDTreeMapUpdir
Applies to: no restrictions.
Move the tree root up a dir (like doing a "cd ..").
------------------------------------------------------------------------------
*NERDTree-U*
Default key: U
Map option: NERDTreeMapUpdirKeepOpen
Applies to: no restrictions.
Like |NERDTree-u| except that the old tree root is kept open.
------------------------------------------------------------------------------
*NERDTree-r*
Default key: r
Map option: NERDTreeMapRefresh
Applies to: files and directories.
If a dir is selected, recursively refresh that dir, i.e. scan the filesystem
for changes and represent them in the tree.
If a file node is selected then the above is done on it's parent.
------------------------------------------------------------------------------
*NERDTree-R*
Default key: R
Map option: NERDTreeMapRefreshRoot
Applies to: no restrictions.
Recursively refresh the tree root.
------------------------------------------------------------------------------
*NERDTree-m*
Default key: m
Map option: NERDTreeMapMenu
Applies to: files and directories.
Display the NERD tree menu. See |NERDTreeMenu| for details.
------------------------------------------------------------------------------
*NERDTree-cd*
Default key: cd
Map option: NERDTreeMapChdir
Applies to: files and directories.
Change vims current working directory to that of the selected node.
------------------------------------------------------------------------------
*NERDTree-CD*
Default key: CD
Map option: NERDTreeMapCWD
Applies to: no restrictions.
Change tree root to vims current working directory.
------------------------------------------------------------------------------
*NERDTree-I*
Default key: I
Map option: NERDTreeMapToggleHidden
Applies to: no restrictions.
Toggles whether hidden files (i.e. "dot files") are displayed.
------------------------------------------------------------------------------
*NERDTree-f*
Default key: f
Map option: NERDTreeMapToggleFilters
Applies to: no restrictions.
Toggles whether file filters are used. See |'NERDTreeIgnore'| for details.
------------------------------------------------------------------------------
*NERDTree-F*
Default key: F
Map option: NERDTreeMapToggleFiles
Applies to: no restrictions.
Toggles whether file nodes are displayed.
------------------------------------------------------------------------------
*NERDTree-B*
Default key: B
Map option: NERDTreeMapToggleBookmarks
Applies to: no restrictions.
Toggles whether the bookmarks table is displayed.
------------------------------------------------------------------------------
*NERDTree-q*
Default key: q
Map option: NERDTreeMapQuit
Applies to: no restrictions.
Closes the NERDtree window.
------------------------------------------------------------------------------
*NERDTree-A*
Default key: A
Map option: NERDTreeMapToggleZoom
Applies to: no restrictions.
Maximize (zoom) and minimize the NERDtree window.
------------------------------------------------------------------------------
*NERDTree-?*
Default key: ?
Map option: NERDTreeMapHelp
Applies to: no restrictions.
Toggles whether the quickhelp is displayed.
------------------------------------------------------------------------------
2.3. The NERD tree menu *NERDTreeMenu*
The NERD tree has a menu that can be programmed via the an API (see
|NERDTreeMenuAPI|). The idea is to simulate the "right click" menus that most
file explorers have.
The script comes with two default menu plugins: exec_menuitem.vim and
fs_menu.vim. fs_menu.vim adds some basic filesystem operations to the menu for
creating/deleting/moving/copying files and dirs. exec_menuitem.vim provides a
menu item to execute executable files.
Related tags: |NERDTree-m| |NERDTreeApi|
==============================================================================
3. Customisation *NERDTreeOptions*
------------------------------------------------------------------------------
3.1. Customisation summary *NERDTreeOptionSummary*
The script provides the following options that can customise the behaviour the
NERD tree. These options should be set in your vimrc.
|'loaded_nerd_tree'| Turns off the script.
|'NERDTreeAutoCenter'| Controls whether the NERD tree window centers
when the cursor moves within a specified
distance to the top/bottom of the window.
|'NERDTreeAutoCenterThreshold'| Controls the sensitivity of autocentering.
|'NERDTreeCaseSensitiveSort'| Tells the NERD tree whether to be case
sensitive or not when sorting nodes.
|'NERDTreeSortHiddenFirst'| Tells the NERD tree whether to take the dot
at the beginning of the hidden file names
into account when sorting nodes.
|'NERDTreeChDirMode'| Tells the NERD tree if/when it should change
vim's current working directory.
|'NERDTreeHighlightCursorline'| Tell the NERD tree whether to highlight the
current cursor line.
|'NERDTreeHijackNetrw'| Tell the NERD tree whether to replace the netrw
autocommands for exploring local directories.
|'NERDTreeIgnore'| Tells the NERD tree which files to ignore.
|'NERDTreeRespectWildIgnore'| Tells the NERD tree to respect |'wildignore'|.
|'NERDTreeBookmarksFile'| Where the bookmarks are stored.
|'NERDTreeBookmarksSort'| Whether the bookmarks list is sorted on
display.
|'NERDTreeMouseMode'| Tells the NERD tree how to handle mouse
clicks.
|'NERDTreeQuitOnOpen'| Closes the tree window after opening a file.
|'NERDTreeShowBookmarks'| Tells the NERD tree whether to display the
bookmarks table on startup.
|'NERDTreeShowFiles'| Tells the NERD tree whether to display files
in the tree on startup.
|'NERDTreeShowHidden'| Tells the NERD tree whether to display hidden
files on startup.
|'NERDTreeShowLineNumbers'| Tells the NERD tree whether to display line
numbers in the tree window.
|'NERDTreeSortOrder'| Tell the NERD tree how to sort the nodes in
the tree.
|'NERDTreeStatusline'| Set a statusline for NERD tree windows.
|'NERDTreeWinPos'| Tells the script where to put the NERD tree
window.
|'NERDTreeWinSize'| Sets the window size when the NERD tree is
opened.
|'NERDTreeMinimalUI'| Disables display of the 'Bookmarks' label and
'Press ? for help' text.
|'NERDTreeDirArrows'| Tells the NERD tree to use arrows instead of
+ ~ chars when displaying directories.
|'NERDTreeCascadeOpenSingleChildDir'|
Cascade open while selected directory has only
one child that also is a directory.
|'NERDTreeAutoDeleteBuffer'| Tells the NERD tree to automatically remove
a buffer when a file is being deleted or renamed
via a context menu command.
------------------------------------------------------------------------------
3.2. Customisation details *NERDTreeOptionDetails*
To enable any of the below options you should put the given line in your
~/.vimrc
*'loaded_nerd_tree'*
If this plugin is making you feel homicidal, it may be a good idea to turn it
off with this line in your vimrc: >
let loaded_nerd_tree=1
<
------------------------------------------------------------------------------
*'NERDTreeAutoCenter'*
Values: 0 or 1.
Default: 1
If set to 1, the NERD tree window will center around the cursor if it moves to
within |'NERDTreeAutoCenterThreshold'| lines of the top/bottom of the window.
This is ONLY done in response to tree navigation mappings,
i.e. |NERDTree-J| |NERDTree-K| |NERDTree-C-J| |NERDTree-C-K| |NERDTree-p|
|NERDTree-P|
The centering is done with a |zz| operation.
------------------------------------------------------------------------------
*'NERDTreeAutoCenterThreshold'*
Values: Any natural number.
Default: 3
This option controls the "sensitivity" of the NERD tree auto centering. See
|'NERDTreeAutoCenter'| for details.
------------------------------------------------------------------------------
*'NERDTreeCaseSensitiveSort'*
Values: 0 or 1.
Default: 0.
By default the NERD tree does not sort nodes case sensitively, i.e. nodes
could appear like this: >
bar.c
Baz.c
blarg.c
boner.c
Foo.c
<
But, if you set this option to 1 then the case of the nodes will be taken into
account. The above nodes would then be sorted like this: >
Baz.c
Foo.c
bar.c
blarg.c
boner.c
<
------------------------------------------------------------------------------
*'NERDTreeChDirMode'*
Values: 0, 1 or 2.
Default: 0.
Use this option to tell the script when (if at all) to change the current
working directory (CWD) for vim.
If it is set to 0 then the CWD is never changed by the NERD tree.
If set to 1 then the CWD is changed when the NERD tree is first loaded to the
directory it is initialized in. For example, if you start the NERD tree with >
:NERDTree /home/marty/foobar
<
then the CWD will be changed to /home/marty/foobar and will not be changed
again unless you init another NERD tree with a similar command.
If the option is set to 2 then it behaves the same as if set to 1 except that
the CWD is changed whenever the tree root is changed. For example, if the CWD
is /home/marty/foobar and you make the node for /home/marty/foobar/baz the new
root then the CWD will become /home/marty/foobar/baz.
------------------------------------------------------------------------------
*'NERDTreeHighlightCursorline'*
Values: 0 or 1.
Default: 1.
If set to 1, the current cursor line in the NERD tree buffer will be
highlighted. This is done using the |'cursorline'| option.
------------------------------------------------------------------------------
*'NERDTreeHijackNetrw'*
Values: 0 or 1.
Default: 1.
If set to 1, doing a >
:edit <some directory>
<
will open up a "secondary" NERD tree instead of a netrw in the target window.
Secondary NERD trees behaves slightly different from a regular trees in the
following respects:
1. 'o' will open the selected file in the same window as the tree,
replacing it.
2. you can have as many secondary tree as you want in the same tab.
------------------------------------------------------------------------------
*'NERDTreeIgnore'*
Values: a list of regular expressions.
Default: ['\~$'].
This option is used to specify which files the NERD tree should ignore. It
must be a list of regular expressions. When the NERD tree is rendered, any
files/dirs that match any of the regex's in 'NERDTreeIgnore' wont be
displayed.
For example if you put the following line in your vimrc: >
let NERDTreeIgnore=['\.vim$', '\~$']
<
then all files ending in .vim or ~ will be ignored.
There are 2 magic flags that can be appended to the end of each regular
expression to specify that the regex should match only files or only dirs.
These flags are "[[dir]]" and "[[file]]". Example: >
let NERDTreeIgnore=['.d$[[dir]]', '.o$[[file]]']
<
This will cause all dirs ending in ".d" to be ignored and all files ending in
".o" to be ignored.
Note: to tell the NERD tree not to ignore any files you must use the following
line: >
let NERDTreeIgnore=[]
<
The file filters can be turned on and off dynamically with the |NERDTree-f|
mapping.
------------------------------------------------------------------------------
*'NERDTreeRespectWildIgnore'*
Values: 0 or 1.
Default: 0.
If set to 1, the |'wildignore'| setting is respected.
------------------------------------------------------------------------------
*'NERDTreeBookmarksFile'*
Values: a path
Default: $HOME/.NERDTreeBookmarks
This is where bookmarks are saved. See |NERDTreeBookmarkCommands|.
------------------------------------------------------------------------------
*'NERDTreeBookmarksSort'*
Values: 0 or 1
Default: 1
If set to 0 then the bookmarks list is not sorted.
If set to 1 the bookmarks list is sorted.
------------------------------------------------------------------------------
*'NERDTreeMouseMode'*
Values: 1, 2 or 3.
Default: 1.
If set to 1 then a double click on a node is required to open it.
If set to 2 then a single click will open directory nodes, while a double
click will still be required for file nodes.
If set to 3 then a single click will open any node.
Note: a double click anywhere on a line that a tree node is on will
activate it, but all single-click activations must be done on name of the node
itself. For example, if you have the following node: >
| | |-application.rb
<
then (to single click activate it) you must click somewhere in
'application.rb'.
------------------------------------------------------------------------------
*'NERDTreeQuitOnOpen'*
Values: 0 or 1.
Default: 0
If set to 1, the NERD tree window will close after opening a file with the
|NERDTree-o|, |NERDTree-i|, |NERDTree-t| and |NERDTree-T| mappings.
------------------------------------------------------------------------------
*'NERDTreeShowBookmarks'*
Values: 0 or 1.
Default: 0.
If this option is set to 1 then the bookmarks table will be displayed.
This option can be toggled dynamically, per tree, with the |NERDTree-B|
mapping.
------------------------------------------------------------------------------
*'NERDTreeShowFiles'*
Values: 0 or 1.
Default: 1.
If this option is set to 1 then files are displayed in the NERD tree. If it is
set to 0 then only directories are displayed.
This option can be toggled dynamically, per tree, with the |NERDTree-F|
mapping and is useful for drastically shrinking the tree when you are
navigating to a different part of the tree.
------------------------------------------------------------------------------
*'NERDTreeShowHidden'*
Values: 0 or 1.
Default: 0.
This option tells vim whether to display hidden files by default. This option
can be dynamically toggled, per tree, with the |NERDTree-I| mapping. Use one
of the follow lines to set this option: >
let NERDTreeShowHidden=0
let NERDTreeShowHidden=1
<
------------------------------------------------------------------------------
*'NERDTreeShowLineNumbers'*
Values: 0 or 1.
Default: 0.
This option tells vim whether to display line numbers for the NERD tree
window. Use one of the follow lines to set this option: >
let NERDTreeShowLineNumbers=0
let NERDTreeShowLineNumbers=1
<
------------------------------------------------------------------------------
*'NERDTreeSortOrder'*
Values: a list of regular expressions.
Default: ['\/$', '*', '\.swp$', '\.bak$', '\~$']
This option is set to a list of regular expressions which are used to
specify the order of nodes under their parent.
For example, if the option is set to: >
['\.vim$', '\.c$', '\.h$', '*', 'foobar']
<
then all .vim files will be placed at the top, followed by all .c files then
all .h files. All files containing the string 'foobar' will be placed at the
end. The star is a special flag: it tells the script that every node that
doesnt match any of the other regexps should be placed here.
If no star is present in 'NERDTreeSortOrder' then one is automatically
appended to the array.
The regex '\/$' should be used to match directory nodes.
After this sorting is done, the files in each group are sorted alphabetically.
Other examples: >
(1) ['*', '\/$']
(2) []
(3) ['\/$', '\.rb$', '\.php$', '*', '\.swp$', '\.bak$', '\~$']
<
1. Directories will appear last, everything else will appear above.
2. Everything will simply appear in alphabetical order.
3. Dirs will appear first, then ruby and php. Swap files, bak files and vim
backup files will appear last with everything else preceding them.
------------------------------------------------------------------------------
*'NERDTreeStatusline'*
Values: Any valid statusline setting.
Default: %{b:NERDTreeRoot.path.strForOS(0)}
Tells the script what to use as the |'statusline'| setting for NERD tree
windows.
Note that the statusline is set using |:let-&| not |:set| so escaping spaces
isn't necessary.
Setting this option to -1 will will deactivate it so that your global
statusline setting is used instead.
------------------------------------------------------------------------------
*'NERDTreeWinPos'*
Values: "left" or "right"
Default: "left".
This option is used to determine where NERD tree window is placed on the
screen.
This option makes it possible to use two different explorer plugins
simultaneously. For example, you could have the taglist plugin on the left of
the window and the NERD tree on the right.
------------------------------------------------------------------------------
*'NERDTreeWinSize'*
Values: a positive integer.
Default: 31.
This option is used to change the size of the NERD tree when it is loaded.
------------------------------------------------------------------------------
*'NERDTreeMinimalUI'*
Values: 0 or 1
Default: 0
This options disables the 'Bookmarks' label 'Press ? for help' text. Use one
of the following lines to set this option: >
let NERDTreeMinimalUI=0
let NERDTreeMinimalUI=1
<
------------------------------------------------------------------------------
*'NERDTreeDirArrows'*
Values: 0 or 1
Default: 0.
This option is used to change the default look of directory nodes displayed in
the tree. When set to 0 it shows old-school bars (|), + and ~ chars. If set to
1 it shows right and down arrows. Use one of the follow lines to set this
option: >
let NERDTreeDirArrows=0
let NERDTreeDirArrows=1
<
------------------------------------------------------------------------------
*'NERDTreeCascadeOpenSingleChildDir'*
Values: 0 or 1
Default: 1.
When opening dir nodes, this option tells NERDTree to recursively open dirs
that have only one child which is also a dir. NERDTree will stop when it finds
a dir that contains anything but another single dir. This option also causes
the |NERDTree-x| mapping to close dirs in the same manner. This option may be
useful for Java projects. Use one of the follow lines to set this option: >
let NERDTreeCascadeOpenSingleChildDir=0
let NERDTreeCascadeOpenSingleChildDir=1
<
------------------------------------------------------------------------------
*'NERDTreeAutoDeleteBuffer'*
Values: 0 or 1
Default: 0.
When using a context menu to delete or rename a file you may also want to delete
the buffer which is no more valid. If the option is not set you will see a
confirmation if you really want to delete an old buffer. If you always press 'y'
then it worths to set this option to 1. Use one of the follow lines to set this
option: >
let NERDTreeAutoDeleteBuffer=0
let NERDTreeAutoDeleteBuffer=1
<
==============================================================================
4. The NERD tree API *NERDTreeAPI*
The NERD tree script allows you to add custom key mappings and menu items via
a set of API calls. Any scripts that use this API should be placed in
~/.vim/nerdtree_plugin/ (*nix) or ~/vimfiles/nerdtree_plugin (windows).
The script exposes some prototype objects that can be used to manipulate the
tree and/or get information from it: >
g:NERDTreePath
g:NERDTreeDirNode
g:NERDTreeFileNode
g:NERDTreeBookmark
<
See the code/comments in NERD_tree.vim to find how to use these objects. The
following code conventions are used:
* class members start with a capital letter
* instance members start with a lower case letter
* private members start with an underscore
See this blog post for more details:
http://got-ravings.blogspot.com/2008/09/vim-pr0n-prototype-based-objects.html
------------------------------------------------------------------------------
4.1. Key map API *NERDTreeKeymapAPI*
NERDTreeAddKeyMap({options}) *NERDTreeAddKeyMap()*
Adds a new keymapping for all NERD tree buffers.
{options} must be a dictionary, and must contain the following keys:
"key" - the trigger key for the new mapping
"callback" - the function the new mapping will be bound to
"quickhelpText" - the text that will appear in the quickhelp (see
|NERDTree-?|)
"override" - if 1 then this new mapping will override whatever previous
mapping was defined for the key/scope combo. Useful for overriding the
default mappings.
Additionally, a "scope" argument may be supplied. This constrains the
mapping so that it is only activated if the cursor is on a certain object.
That object is then passed into the handling method. Possible values are:
"FileNode" - a file node
"DirNode" - a directory node
"Node" - a file or directory node
"Bookmark" - A bookmark
"all" - the keymap is not constrained to any scope (default). When
thei is used, the handling function is not passed any arguments.
Example: >
call NERDTreeAddKeyMap({
\ 'key': 'foo',
\ 'callback': 'NERDTreeCDHandler',
\ 'quickhelpText': 'echo full path of current node',
\ 'scope': 'DirNode' })
function! NERDTreeCDHandler(dirnode)
call a:dirnode.changeToDir()
endfunction
<
This code should sit in a file like ~/.vim/nerdtree_plugin/mymapping.vim.
It adds a (redundant) mapping on 'foo' which changes vim's CWD to that of
the current dir node. Note this mapping will only fire when the cursor is
on a directory node.
------------------------------------------------------------------------------
4.2. Menu API *NERDTreeMenuAPI*
NERDTreeAddSubmenu({options}) *NERDTreeAddSubmenu()*
Creates and returns a new submenu.
{options} must be a dictionary and must contain the following keys:
"text" - the text of the submenu that the user will see
"shortcut" - a shortcut key for the submenu (need not be unique)
The following keys are optional:
"isActiveCallback" - a function that will be called to determine whether
this submenu item will be displayed or not. The callback function must return
0 or 1.
"parent" - the parent submenu of the new submenu (returned from a previous
invocation of NERDTreeAddSubmenu()). If this key is left out then the new
submenu will sit under the top level menu.
See below for an example.
NERDTreeAddMenuItem({options}) *NERDTreeAddMenuItem()*
Adds a new menu item to the NERD tree menu (see |NERDTreeMenu|).
{options} must be a dictionary and must contain the
following keys:
"text" - the text of the menu item which the user will see
"shortcut" - a shortcut key for the menu item (need not be unique)
"callback" - the function that will be called when the user activates the
menu item.
The following keys are optional:
"isActiveCallback" - a function that will be called to determine whether
this menu item will be displayed or not. The callback function must return
0 or 1.
"parent" - if the menu item belongs under a submenu then this key must be
specified. This value for this key will be the object that
was returned when the submenu was created with |NERDTreeAddSubmenu()|.
See below for an example.
NERDTreeAddMenuSeparator([{options}]) *NERDTreeAddMenuSeparator()*
Adds a menu separator (a row of dashes).
{options} is an optional dictionary that may contain the following keys:
"isActiveCallback" - see description in |NERDTreeAddMenuItem()|.
Below is an example of the menu API in action. >
call NERDTreeAddMenuSeparator()
call NERDTreeAddMenuItem({
\ 'text': 'a (t)op level menu item',
\ 'shortcut': 't',
\ 'callback': 'SomeFunction' })
let submenu = NERDTreeAddSubmenu({
\ 'text': 'a (s)ub menu',
\ 'shortcut': 's' })
call NERDTreeAddMenuItem({
\ 'text': '(n)ested item 1',
\ 'shortcut': 'n',
\ 'callback': 'SomeFunction',
\ 'parent': submenu })
call NERDTreeAddMenuItem({
\ 'text': '(n)ested item 2',
\ 'shortcut': 'n',
\ 'callback': 'SomeFunction',
\ 'parent': submenu })
<
This will create the following menu: >
--------------------
a (t)op level menu item
a (s)ub menu
<
Where selecting "a (s)ub menu" will lead to a second menu: >
(n)ested item 1
(n)ested item 2
<
When any of the 3 concrete menu items are selected the function "SomeFunction"
will be called.
------------------------------------------------------------------------------
4.3 NERDTreeAddPathFilter(callback) *NERDTreeAddPathFilter()*
Path filters are essentially a more powerful version of |NERDTreeIgnore|.
If the simple regex matching in |NERDTreeIgnore| is not enough then use
|NERDTreeAddPathFilter()| to add a callback function that paths will be
checked against when the decision to ignore them is made. Example >
call NERDTreeAddPathFilter('MyFilter')
function! MyFilter(params)
"params is a dict containing keys: 'nerdtree' and 'path' which are
"g:NERDTree and g:NERDTreePath objects
"return 1 to ignore params['path'] or 0 otherwise
endfunction
<
------------------------------------------------------------------------------
4.4 Path Listener API *NERDTreePathListenerAPI*
Use this API if you want to run a callback for events on Path objects. E.G >
call g:NERDTreePathNotifier.AddListener("init", "MyListener")
"....
function! MyListener(event)
"This function will be called whenever a Path object is created.
"a:event is an object that contains a bunch of relevant info -
"including the path in question. See lib/nerdtree/event.vim for details.
endfunction
<
Current events supported:
init ~
refresh ~
refreshFlags ~
------------------------------------------------------------------------------
NERDTreeRender() *NERDTreeRender()*
Re-renders the NERD tree buffer. Useful if you change the state of the
tree and you want to it to be reflected in the UI.
==============================================================================
5. About *NERDTreeAbout*
The author of the NERD tree is a terrible terrible monster called Martyzilla
who gobbles up small children with milk and sugar for breakfast.
He can be reached at martin.grenfell at gmail dot com. He would love to hear
from you, so feel free to send him suggestions and/or comments about this
plugin. Don't be shy --- the worst he can do is slaughter you and stuff you in
the fridge for later ;)
The latest stable versions can be found at
http://www.vim.org/scripts/script.php?script_id=1658
The latest dev versions are on github
http://github.com/scrooloose/nerdtree
==============================================================================
6. License *NERDTreeLicense*
The NERD tree is released under the wtfpl.
See http://sam.zoy.org/wtfpl/COPYING.
| {
"pile_set_name": "Github"
} |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.openapi.editor;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.util.NlsSafe;
import com.intellij.openapi.util.UserDataHolderEx;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
/**
* Represents a specific caret instance in the editor.
* Provides methods to query and modify caret position and caret's associated selection.
* <p>
* Instances of this interface are supposed to be obtained from {@link CaretModel} instance, and not created explicitly.
*/
public interface Caret extends UserDataHolderEx, Disposable {
/**
* Returns an instance of Editor, current caret belongs to.
*/
@NotNull
Editor getEditor();
/**
* Returns an instance of CaretModel, current caret is associated with.
*/
@NotNull
CaretModel getCaretModel();
/**
* Tells whether this caret is valid, i.e. recognized by the caret model currently. Caret is valid since its creation till its
* removal from caret model.
*
* @see CaretModel#addCaret(VisualPosition)
* @see CaretModel#removeCaret(Caret)
*/
boolean isValid();
/**
* Moves the caret by the specified number of lines and/or columns.
*
* @param columnShift the number of columns to move the caret by.
* @param lineShift the number of lines to move the caret by.
* @param withSelection if true, the caret move should extend the selection in the document.
* @param scrollToCaret if true, the document should be scrolled so that the caret is visible after the move.
*/
void moveCaretRelatively(int columnShift,
int lineShift,
boolean withSelection,
boolean scrollToCaret);
/**
* Moves the caret to the specified logical position.
* If corresponding position is in the folded region currently, the region will be expanded.
*
* @param pos the position to move to.
*/
void moveToLogicalPosition(@NotNull LogicalPosition pos);
/**
* Moves the caret to the specified visual position.
*
* @param pos the position to move to.
*/
void moveToVisualPosition(@NotNull VisualPosition pos);
/**
* Short hand for calling {@link #moveToOffset(int, boolean)} with {@code 'false'} as a second argument.
*
* @param offset the offset to move to
*/
void moveToOffset(int offset);
/**
* Moves the caret to the specified offset in the document.
* If corresponding position is in the folded region currently, the region will be expanded.
*
* @param offset the offset to move to.
* @param locateBeforeSoftWrap there is a possible case that there is a soft wrap at the given offset, hence, the same offset
* corresponds to two different visual positions - just before soft wrap and just after soft wrap.
* We may want to clearly indicate where to put the caret then. Given parameter allows to do that.
* <b>Note:</b> it's ignored if there is no soft wrap at the given offset
*/
void moveToOffset(int offset, boolean locateBeforeSoftWrap);
/**
* Tells whether caret is in consistent state currently. This might not be the case during document update, but client code can
* observe such a state only in specific circumstances. So unless you're implementing very low-level editor logic (involving
* {@code PrioritizedDocumentListener}), you don't need this method - you'll only see it return {@code true}.
*/
boolean isUpToDate();
/**
* Returns the logical position of the caret.
*
* @return the caret position.
*/
@NotNull
LogicalPosition getLogicalPosition();
/**
* Returns the visual position of the caret.
*
* @return the caret position.
*/
@NotNull
VisualPosition getVisualPosition();
/**
* Returns the offset of the caret in the document. Returns 0 for a disposed (invalid) caret.
*
* @return the caret offset.
*
* @see #isValid()
*/
int getOffset();
/**
* @return document offset for the start of the visual line where caret is located
*/
int getVisualLineStart();
/**
* @return document offset that points to the first symbol shown at the next visual line after the one with caret on it
*/
int getVisualLineEnd();
/**
* Returns the start offset in the document of the selected text range, or the caret
* position if there is currently no selection.
*
* @return the selection start offset.
*/
int getSelectionStart();
/**
* @return object that encapsulates information about visual position of selected text start if any
*/
@NotNull
VisualPosition getSelectionStartPosition();
/**
* Returns the end offset in the document of the selected text range, or the caret
* position if there is currently no selection.
*
* @return the selection end offset.
*/
int getSelectionEnd();
/**
* @return object that encapsulates information about visual position of selected text end if any;
*/
@NotNull
VisualPosition getSelectionEndPosition();
/**
* Returns the text selected in the editor.
*
* @return the selected text, or null if there is currently no selection.
*/
@Nullable
@NlsSafe String getSelectedText();
/**
* Returns the offset from which the user started to extend the selection (the selection start
* if the selection was extended in forward direction, or the selection end if it was
* extended backward).
*
* @return the offset from which the selection was started, or the caret offset if there is
* currently no selection.
*/
int getLeadSelectionOffset();
/**
* @return object that encapsulates information about visual position from which the user started to extend the selection if any
*/
@NotNull
VisualPosition getLeadSelectionPosition();
/**
* Checks if a range of text is currently selected.
*
* @return true if a range of text is selected, false otherwise.
*/
boolean hasSelection();
/**
* Selects the specified range of text.
* <p>
* System selection will be updated, if such feature is supported by current editor.
*
* @param startOffset the start offset of the text range to select.
* @param endOffset the end offset of the text range to select.
*/
void setSelection(int startOffset, int endOffset);
/**
* Selects the specified range of text.
*
* @param startOffset the start offset of the text range to select.
* @param endOffset the end offset of the text range to select.
* @param updateSystemSelection whether system selection should be updated (might not have any effect if current editor doesn't support such a feature)
*/
void setSelection(int startOffset, int endOffset, boolean updateSystemSelection);
/**
* Selects target range providing information about visual boundary of selection end.
* <p/>
* That is the case for soft wraps-aware processing where the whole soft wraps virtual space is matched to the same offset.
* <p/>
* Also, in column mode this method allows to create selection spanning virtual space after the line end.
* <p>
* System selection will be updated, if such feature is supported by current editor.
*
* @param startOffset start selection offset
* @param endPosition end visual position of the text range to select ({@code null} argument means that
* no specific visual position should be used)
* @param endOffset end selection offset
*/
void setSelection(int startOffset, @Nullable VisualPosition endPosition, int endOffset);
/**
* Selects target range based on its visual boundaries.
* <p/>
* That is the case for soft wraps-aware processing where the whole soft wraps virtual space is matched to the same offset.
* <p/>
* Also, in column mode this method allows to create selection spanning virtual space after the line end.
* <p>
* System selection will be updated, if such feature is supported by current editor.
*
* @param startPosition start visual position of the text range to select ({@code null} argument means that
* no specific visual position should be used)
* @param endPosition end visual position of the text range to select ({@code null} argument means that
* no specific visual position should be used)
* @param startOffset start selection offset
* @param endOffset end selection offset
*/
void setSelection(@Nullable VisualPosition startPosition, int startOffset, @Nullable VisualPosition endPosition, int endOffset);
/**
* Selects target range based on its visual boundaries.
* <p/>
* That is the case for soft wraps-aware processing where the whole soft wraps virtual space is matched to the same offset.
* <p/>
* Also, in column mode this method allows to create selection spanning virtual space after the line end.
*
* @param startPosition start visual position of the text range to select ({@code null} argument means that
* no specific visual position should be used)
* @param endPosition end visual position of the text range to select ({@code null} argument means that
* no specific visual position should be used)
* @param startOffset start selection offset
* @param endOffset end selection offset
* @param updateSystemSelection whether system selection should be updated (might not have any effect if current editor doesn't support such a feature)
*/
void setSelection(@Nullable VisualPosition startPosition, int startOffset, @Nullable VisualPosition endPosition, int endOffset, boolean updateSystemSelection);
/**
* Removes the selection in the editor.
*/
void removeSelection();
/**
* Selects the entire line of text at the caret position.
*/
void selectLineAtCaret();
/**
* Selects the entire word at the caret position, optionally using camel-case rules to
* determine word boundaries.
*
* @param honorCamelWordsSettings if true and "Use CamelHumps words" is enabled,
* upper-case letters within the word are considered as
* boundaries for the range of text to select.
*/
void selectWordAtCaret(boolean honorCamelWordsSettings);
/**
* Clones the current caret and positions the new one right above or below the current one. If current caret has selection, corresponding
* selection will be set for the new caret.
*
* @param above if {@code true}, new caret will be created at the previous line, if {@code false} - on the next line
* @return newly created caret instance, or {@code null} if the caret cannot be created because it already exists at the new location
* or maximum supported number of carets already exists in editor ({@link CaretModel#getMaxCaretCount()}).
*/
@Nullable
Caret clone(boolean above);
/**
* Returns {@code true} if caret is located in RTL text fragment. In that case visual column number is inversely related
* to offset and logical column number in the vicinity of caret.
*/
boolean isAtRtlLocation();
/**
* Returns {@code true} if caret is located at a boundary between different runs of bidirectional text.
* This means that text fragments at different sides of the boundary are non-adjacent in logical order.
* Caret can located at any side of the boundary,
* exact location can be determined from directionality flags of caret's logical and visual position
* ({@link LogicalPosition#leansForward} and {@link VisualPosition#leansRight}).
*/
boolean isAtBidiRunBoundary();
/**
* Returns visual attributes currently set for the caret.
*
* @see #setVisualAttributes(CaretVisualAttributes)
*/
@NotNull
CaretVisualAttributes getVisualAttributes();
/**
* Sets caret's current visual attributes. This can have no effect if editor doesn't support changing caret's visual appearance.
*
* @see #getVisualAttributes()
*/
void setVisualAttributes(@NotNull CaretVisualAttributes attributes);
}
| {
"pile_set_name": "Github"
} |
<?php
/**
* This file is part of the Carbon package.
*
* (c) Brian Nesbitt <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/*
* Authors:
* - Roy
* - Stephan
* - François B
* - Tim Fish
* - Kevin Huang
* - Jacob Middag
* - JD Isaacks
* - Propaganistas
*/
return array_replace_recursive(require __DIR__.'/nl.php', [
'formats' => [
'L' => 'DD/MM/YYYY',
],
]);
| {
"pile_set_name": "Github"
} |
/****************************************************************************
* arch/risc-v/src/fe310/fe310_memorymap.h
*
* Copyright (C) 2019 Masayuki Ishikawa. All rights reserved.
* Author: Masayuki Ishikawa <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
#ifndef _ARCH_RISCV_SRC_FE310_FE310_MEMORYMAP_H
#define _ARCH_RISCV_SRC_FE310_FE310_MEMORYMAP_H
/****************************************************************************
* Included Files
****************************************************************************/
#include "hardware/fe310_memorymap.h"
#include "hardware/fe310_uart.h"
#include "hardware/fe310_clint.h"
#include "hardware/fe310_gpio.h"
#include "hardware/fe310_plic.h"
#include "hardware/fe310_prci.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Idle thread stack starts from _ebss */
#ifndef __ASSEMBLY__
#define FE310_IDLESTACK_BASE (uint32_t)&_ebss
#else
#define FE310_IDLESTACK_BASE _ebss
#endif
#define FE310_IDLESTACK_SIZE (CONFIG_IDLETHREAD_STACKSIZE & ~3)
#define FE310_IDLESTACK_TOP (FE310_IDLESTACK_BASE + FE310_IDLESTACK_SIZE)
#endif /* _ARCH_RISCV_SRC_FE310_FE310_MEMORYMAP_H */
| {
"pile_set_name": "Github"
} |
# Функциональное программирование
* Изучить книгу [Professor Frisby's Mostly Adequate Guide to Functional Programming](https://github.com/MostlyAdequate/mostly-adequate-guide) от Franklin Frisby (8-12 главы включительно).
* [Перевод](https://github.com/MostlyAdequate/mostly-adequate-guide-ru)
* Изучить книгу [Functional-Light JavaScript](https://github.com/getify/Functional-Light-JS) от Kyle Simpson (9-11 главы, плюс дополнения).
* Как реализовать функционал объектов через замыкания и наоборот? В чем преимущества каждого способа?
* Как можно создать приватные значения в функции? Как можно сделать их иммутабельными?
* Что такое категории? Что такое морфизмы?
* Что такое изоморфизм?
* Что такое функтор?
* Как можно избежать лишних проходов по массиву?
* Что такое ленивые вычисления? В чем преимущества и недостатки ленивых вычислений?
* Что такое трансдукция? Какие проблемы можно решить с помощью данного инструмента?
* Что такое pointed функтор?
* Что такое монады? Для чего они используются? Что делают приведенные ниже монады?
* `Maybe`
* `Either`
* `IO`
* Что такое ассоциативность?
* Что такое аппликативный функтор? Какова область его применения?
* Что делают приведенные ниже утилиты?
* `identity`
* `partial`/`partialRight`
* `prop`
* `curry`/`uncurry`
* `cond`
* `flatMap`/`chain`
* Immutability and state
* Может ли состояние программы быть иммутабельным?
* В чём проблема хранения состояния программы?
* Почему мутабельное состояние затрудняет соблюдение гарантии инвариантов на протяжении жизненного цикла программы?
* Почему мутабельность накладывает дополнительные требования к порядку выполнения функций? Как этого можно избежать?
* Опишите следующие модели состояния. Как их можно градировать по степени опасности?
* Невидимое клиенту изменяемое состояние
* Инкапсулированное изменяемое состояние
* Невидимое программисту изменяемое состояние
* Двухфазный цикл жизни
* Разделяемое между несколькими процессами изменяемое состояние
* Отсутствие изменяемого состояния
* Неинкапсулированное изменяемое состояние
* Управляемое изменяемое состояние
* Монотонное изменяемое состояние
### Ресурсы
* [Embracing Immutable Architecture](https://medium.com/react-weekly/embracing-immutable-architecture-dc04e3f08543)
* [Изменяемое состояние: опасности и борьба с ними](http://fprog.ru/2009/issue1/eugene-kirpichov-fighting-mutable-state/)
* [Immutable Data Structures and JavaScript](https://jlongster.com/Using-Immutable-Data-Structures-in-JavaScript#Immutable.js)
* [Mutability Leads to Suffering](https://hackernoon.com/mutability-leads-to-suffering-23671a0def6a)
* [The discussion thread about mutability](http://lambda-the-ultimate.org/node/724#comment-6580)
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="ISO-8859-1"?>
<!--
This is the document type descriptor for the
org.eclipse.jetty.xml.XmlConfiguration class. It allows a java object to be
configured by with a sequence of Set, Put and Call elements. These tags are
mapped to methods on the object to be configured as follows:
<Set name="Test">value</Set> == obj.setTest("value");
<Put name="Test">value</Put> == obj.put("Test","value");
<Call name="test"><Arg>value</Arg></Call> == obj.test("value");
Values themselves may be configured objects that are created with the
<New> tag or returned from a <Call> tag.
Values are matched to arguments on a best effort approach, but types
my be specified if a match is not achieved.
-->
<!ENTITY % CONFIG "Set|Get|Put|Call|New|Ref|Array|Map|Property">
<!ENTITY % VALUE "#PCDATA|Get|Call|New|Ref|Array|Map|SystemProperty|Env|Property">
<!ENTITY % TYPEATTR "type CDATA #IMPLIED " > <!-- String|Character|Short|Byte|Integer|Long|Boolean|Float|Double|char|short|byte|int|long|boolean|float|double|URL|InetAddress|InetAddrPort| #classname -->
<!ENTITY % IMPLIEDCLASSATTR "class CDATA #IMPLIED" >
<!ENTITY % CLASSATTR "class CDATA #REQUIRED" >
<!ENTITY % NAMEATTR "name CDATA #REQUIRED" >
<!ENTITY % IMPLIEDNAMEATTR "name CDATA #IMPLIED" >
<!ENTITY % DEFAULTATTR "default CDATA #IMPLIED" >
<!ENTITY % IDATTR "id ID #IMPLIED" >
<!ENTITY % REFATTR "refid CDATA #IMPLIED" >
<!ENTITY % REQUIREDIDATTR "id ID #REQUIRED" >
<!--
Configure Element.
This is the root element that specifies the class of object that
can be configured:
<Configure class="com.acme.MyClass"> ... </Configure>
-->
<!ELEMENT Configure (Arg*,(%CONFIG;)*) >
<!ATTLIST Configure %IMPLIEDCLASSATTR; %IDATTR; >
<!--
Set Element.
This element maps to a call to a setter method or field on the current object.
The name and optional type attributes are used to select the setter
method. If the name given is xxx, then a setXxx method is used, or
the xxx field is used of setXxx cannot be found.
A Set element can contain value text and/or the value objects returned
by other elements such as Call, New, SystemProperty, etc.
If no value type is specified, then white
space is trimmed out of the value. If it contains multiple value
elements they are added as strings before being converted to any
specified type.
A Set with a class attribute is treated as a static set method invocation.
-->
<!ELEMENT Set (%VALUE;)* >
<!ATTLIST Set %NAMEATTR; %TYPEATTR; %IMPLIEDCLASSATTR; >
<!--
Get Element.
This element maps to a call to a getter method or field on the current object.
The name attribute is used to select the get method.
If the name given is xxx, then a getXxx method is used, or
the xxx field is used if getXxx cannot be found.
A Get element can contain other elements such as Set, Put, Call, etc.
which act on the object returned by the get call.
A Get with a class attribute is treated as a static get method or field.
-->
<!ELEMENT Get (%CONFIG;)* >
<!ATTLIST Get %NAMEATTR; %IMPLIEDCLASSATTR; %IDATTR; >
<!--
Put Element.
This element maps to a call to a put method on the current object,
which must implement the Map interface. The name attribute is used
as the put key and the optional type attribute can force the type
of the value.
A Put element can contain value text and/or value elements such as Call,
New, SystemProperty, etc. If no value type is specified, then white
space is trimmed out of the value. If it contains multiple value
elements they are added as strings before being converted to any
specified type.
-->
<!ELEMENT Put (%VALUE;)* >
<!ATTLIST Put %NAMEATTR; %TYPEATTR; >
<!--
Call Element.
This element maps to an arbitrary call to a method on the current object,
The name attribute and Arg elements are used to select the method.
A Call element can contain a sequence of Arg elements followed by
a sequence of other elements such as Set, Put, Call, etc. which act on any object
returned by the original call:
<Call id="o2" name="test">
<Arg>value1</Arg>
<Set name="Test">Value2</Set>
</Call>
This is equivalent to:
Object o2 = o1.test("value1");
o2.setTest("value2");
A Call with a class attribute is treated as a static call.
-->
<!ELEMENT Call (Arg*,(%CONFIG;)*) >
<!ATTLIST Call %NAMEATTR; %IMPLIEDCLASSATTR; %IDATTR; >
<!--
Arg Element.
This element defines a positional or optional named argument for the
Call and New elements. The optional type attribute can force the type
of the value.
An Arg element can contain value text and/or value elements such as Call,
New, SystemProperty, etc. If no value type is specified, then white
space is trimmed out of the value. If it contains multiple value
elements they are added as strings before being converted to any
specified type.
-->
<!ELEMENT Arg (%VALUE;)* >
<!ATTLIST Arg %TYPEATTR; %IMPLIEDNAMEATTR; >
<!--
New Element.
This element allows the creation of a new object as part of a
value for elements such as Set, Put, Arg, etc. The class attribute
determines the type of the new object and the contained Arg elements
are used to select the constructor for the new object.
A New element can contain a sequence of Arg elements followed by
a sequence of elements such as Set, Put, Call, etc. elements
which act on the new object:
<New id="o" class="com.acme.MyClass">
<Arg>value1</Arg>
<Set name="test">Value2</Set>
</New>
This is equivalent to:
Object o = new com.acme.MyClass("value1");
o.setTest("Value2");
-->
<!ELEMENT New (Arg*,(%CONFIG;)*) >
<!ATTLIST New %CLASSATTR; %IDATTR;>
<!--
Ref Element.
This element allows a previously created object to be referenced by id. The
attribute refid is used to specify the id of another object (the attribute id can
also be used, but it's use is deprecated).
A Ref element can contain a sequence of elements such as Set, Put, Call, etc.
which act on the referenced object.
<Ref refid="myobject">
<Set name="Test">Value2</Set>
</New>
-->
<!ELEMENT Ref (%CONFIG;)* >
<!ATTLIST Ref %IDATTR; %REFATTR;>
<!--
Array Element.
This element allows the creation of a new array as part of a
value of elements such as Set, Put, Arg, etc. The type attribute determines
the type of the new array and the contained Item elements
are used for each element of the array:
<Array type="java.lang.String">
<Item>value0</Item>
<Item><New class="java.lang.String"><Arg>value1</Arg></New></Item>
</Array>
This is equivalent to:
String[] a = new String[] { "value0", new String("value1") };
-->
<!ELEMENT Array (Item*) >
<!ATTLIST Array %TYPEATTR; %IDATTR; >
<!--
Map Element.
This element allows the creation of a new map as part of a
value of elements such as Set, Put, Arg, etc. The type attribute determines
the type of the new array and the contained Item elements
are used for each element of the array:
<Map>
<Entry>
<Item>keyName</Item>
<Item><New class="java.lang.String"><Arg>value1</Arg></New></Item>
</Entry>
</Map>
This is equivalent to:
Map m = new HashMap();
m.put("keyName", new String("value1"));
-->
<!ELEMENT Map (Entry*) >
<!ATTLIST Map %IDATTR; >
<!ELEMENT Entry (Item,Item) >
<!--
Item Element.
This element defines an entry for the Array or Map Entry elements.
The optional type attribute can force the type of the value.
An Item element can contain value text and/or the value object of
elements such as Call, New, SystemProperty, etc. If no value type
is specified, then white space is trimmed out of the value.
If it contains multiple value elements they are added as strings
before being converted to any specified type.
-->
<!ELEMENT Item (%VALUE;)* >
<!ATTLIST Item %TYPEATTR; %IDATTR; >
<!--
System Property Element.
This element allows JVM System properties to be retrieved as
part of the value of elements such as Set, Put, Arg, etc.
The name attribute specifies the property name and the optional
default argument provides a default value.
<SystemProperty name="Test" default="value" />
This is equivalent to:
System.getProperty("Test","value");
-->
<!ELEMENT SystemProperty EMPTY >
<!ATTLIST SystemProperty %NAMEATTR; %DEFAULTATTR; %IDATTR; >
<!--
Environment variable Element.
This element allows OS Environment variables to be retrieved as
part of the value of elements such as Set, Put, Arg, etc.
The name attribute specifies the env variable name and the optional
default argument provides a default value.
<Env name="Test" default="value" />
This is equivalent to:
String v=System.getEnv("Test");
if (v==null) v="value";
-->
<!ELEMENT Env EMPTY >
<!ATTLIST Env %NAMEATTR; %DEFAULTATTR; %IDATTR; >
<!--
Property Element.
This element allows arbitrary properties to be retrieved by name.
The name attribute specifies the property name and the optional
default argument provides a default value.
-->
<!ELEMENT Property (%CONFIG;)* >
<!ATTLIST Property %NAMEATTR; %DEFAULTATTR; %IDATTR; >
| {
"pile_set_name": "Github"
} |
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to [email protected] so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Service
* @subpackage DeveloperGarden
* @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id: GeoCoordinatesType.php 20166 2010-01-09 19:00:17Z bkarwin $
*/
/**
* @see Zend_Service_DeveloperGarden_Response_BaseType
*/
#require_once 'Zend/Service/DeveloperGarden/Response/BaseType.php';
/**
* @category Zend
* @package Zend_Service
* @subpackage DeveloperGarden
* @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com)
* @author Marco Kaiser
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_Service_DeveloperGarden_Response_IpLocation_GeoCoordinatesType
extends Zend_Service_DeveloperGarden_Response_BaseType
{
/**
*
* @var float
*/
public $geoLatitude = null;
/**
*
* @var float
*/
public $geoLongitude = null;
/**
* @return float
*/
public function getLatitude()
{
return $this->geoLatitude;
}
/**
* @return float
*/
public function getLongitude()
{
return $this->geoLongitude;
}
}
| {
"pile_set_name": "Github"
} |
/*
JPC: An x86 PC Hardware Emulator for a pure Java Virtual Machine
Copyright (C) 2012-2013 Ian Preston
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as published by
the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Details (including contact information) can be found at:
jpc.sourceforge.net
or the developer website
sourceforge.net/projects/jpc/
End of licence header
*/
package org.jpc.emulator.execution.opcodes.pm;
import org.jpc.emulator.execution.*;
import org.jpc.emulator.execution.decoder.*;
import org.jpc.emulator.processor.*;
import org.jpc.emulator.processor.fpu64.*;
import static org.jpc.emulator.processor.Processor.*;
public class fldenv_o32_M_mem extends Executable
{
final Pointer op1;
public fldenv_o32_M_mem(int blockStart, int eip, int prefices, PeekableInputStream input)
{
super(blockStart, eip);
int modrm = input.readU8();
op1 = Modrm.getPointer(prefices, modrm, input);
}
public Branch execute(Processor cpu)
{
System.out.println("Warning: Using incomplete opcode: FLDENV_28");
// check for floating point exceptions
int addr = op1.get(cpu);
cpu.fpu.setControl(cpu.linearMemory.getDoubleWord(addr));
cpu.fpu.setStatus(cpu.linearMemory.getDoubleWord(addr+4));
cpu.fpu.setTagWord(cpu.linearMemory.getDoubleWord(addr+8));
//cpu.linearMemory.setWord(addr + 6, (short) 0 /* cpu.fpu.getIP() offset*/);
//cpu.linearMemory.setWord(addr + 8, (short) 0 /* (selector & 0xFFFF)*/);
//cpu.linearMemory.setWord(addr + 10, (short) 0 /* operand pntr offset*/);
//cpu.linearMemory.setWord(addr + 12, (short) 0 /* operand pntr selector & 0xFFFF*/);
return Branch.None;
}
public boolean isBranch()
{
return false;
}
public String toString()
{
return this.getClass().getName();
}
} | {
"pile_set_name": "Github"
} |
# $OpenBSD: Makefile,v 1.39 2020/08/05 04:23:30 deraadt Exp $
FS= install${OSrev}.img
FSSIZE= 1359872
FSTYPE= install360
CDROM= install${OSrev}.iso
MOUNT_POINT= /mnt
RELXDIR?= /home/relx-${MACHINE}
RELDIR?= /home/rel-${MACHINE}
BSDRD= ${RELDIR}/bsd.rd
BASE= ${RELDIR}/base${OSrev}.tgz ${RELDIR}/comp${OSrev}.tgz \
${RELDIR}/game${OSrev}.tgz ${RELDIR}/man${OSrev}.tgz \
${RELDIR}/bsd ${RELDIR}/bsd.rd ${RELDIR}/bsd.mp \
${RELDIR}/INSTALL.${MACHINE}
XBASE= ${RELXDIR}/xbase${OSrev}.tgz ${RELXDIR}/xfont${OSrev}.tgz \
${RELXDIR}/xshare${OSrev}.tgz ${RELXDIR}/xserv${OSrev}.tgz
EFIBOOT?= ${DESTDIR}/usr/mdec/BOOTX64.EFI ${DESTDIR}/usr/mdec/BOOTIA32.EFI
MSDOSSIZE= 960
TOTALSIZE!= expr ${FSSIZE} + ${MSDOSSIZE}
TEMPLATE= ${.CURDIR}/template
all: ${FS} ${CDROM}
${FS}: ${BASE} ${XBASE} bsd.gz
dd if=/dev/zero of=${FS} bs=512 count=${TOTALSIZE}
vnconfig -v ${FS} > vnd
fdisk -yi -l ${FSSIZE} -b ${MSDOSSIZE} -f ${DESTDIR}/usr/mdec/mbr `cat vnd`
.ifdef TEMPLATE
disklabel -wAT ${TEMPLATE} `cat vnd`
newfs -t msdos /dev/r`cat vnd`i
mount /dev/`cat vnd`i ${MOUNT_POINT}
mkdir -p ${MOUNT_POINT}/efi/boot
cp ${EFIBOOT} ${MOUNT_POINT}/efi/boot
umount ${MOUNT_POINT}
.else
disklabel -w `cat vnd` ${FSTYPE}
.endif
newfs -O 1 -m 0 -o space -i 524288 -c ${FSSIZE} /dev/r`cat vnd`a
mount /dev/`cat vnd`a ${MOUNT_POINT}
cp ${DESTDIR}/usr/mdec/boot ${MOUNT_POINT}/boot
strip ${MOUNT_POINT}/boot
strip -R .comment -R .SUNW_ctf ${MOUNT_POINT}/boot
installboot -v -r ${MOUNT_POINT} `cat vnd` \
${DESTDIR}/usr/mdec/biosboot ${MOUNT_POINT}/boot
mkdir -p ${MOUNT_POINT}/${OSREV}/${MACHINE}
mkdir -p ${MOUNT_POINT}/etc
echo "set image /${OSREV}/${MACHINE}/bsd.rd" > ${MOUNT_POINT}/etc/boot.conf
install -c -m 555 -o root -g wheel bsd.gz ${MOUNT_POINT}/bsd
ln ${MOUNT_POINT}/bsd ${MOUNT_POINT}/bsd.rd
cp -p ${BASE} ${MOUNT_POINT}/${OSREV}/${MACHINE}
cp -p ${XBASE} ${MOUNT_POINT}/${OSREV}/${MACHINE}
cat ${RELDIR}/SHA256 ${RELXDIR}/SHA256 > \
${MOUNT_POINT}/${OSREV}/${MACHINE}/SHA256
# XXX no SHA256.sig
df -i ${MOUNT_POINT}
umount ${MOUNT_POINT}
vnconfig -u `cat vnd`
rm -f vnd
${CDROM}: ${BASE} ${XBASE}
rm -rf ${.OBJDIR}/cd-dir
mkdir -p ${.OBJDIR}/cd-dir/${OSREV}/${MACHINE}
mkdir -p ${.OBJDIR}/cd-dir/etc
echo "set image /${OSREV}/${MACHINE}/bsd.rd" > ${.OBJDIR}/cd-dir/etc/boot.conf
cp -p ${BASE} ${.OBJDIR}/cd-dir/${OSREV}/${MACHINE}
cp -p ${XBASE} ${.OBJDIR}/cd-dir/${OSREV}/${MACHINE}
cat ${RELDIR}/SHA256 ${RELXDIR}/SHA256 > \
${.OBJDIR}/cd-dir/${OSREV}/${MACHINE}/SHA256
# XXX no SHA256.sig
cp -p ${RELDIR}/cdbr ${.OBJDIR}/cd-dir/${OSREV}/${MACHINE}
cp -p ${RELDIR}/cdboot ${.OBJDIR}/cd-dir/${OSREV}/${MACHINE}/cdboot
mkhybrid -a -R -T -L -l -d -D -N -o ${.OBJDIR}/${CDROM} \
-A "OpenBSD ${OSREV} ${MACHINE} Install CD" \
-P "Copyright (c) `date +%Y` Theo de Raadt, The OpenBSD project" \
-p "Theo de Raadt <[email protected]>" \
-V "OpenBSD/${MACHINE} ${OSREV} Install CD" \
-b ${OSREV}/${MACHINE}/cdbr -c ${OSREV}/${MACHINE}/boot.catalog \
${.OBJDIR}/cd-dir
install:
cp ${CDROM} ${FS} ${RELDIR}/
clean cleandir:
rm -f ${CDROM} ${FS}
rm -rf cd-dir
bsd.gz: bsd.rd
cp bsd.rd bsd.strip
strip bsd.strip
strip -R .comment -R .SUNW_ctf bsd.strip
gzip -9cn bsd.strip > bsd.gz
bsd.rd: ${BSDRD}
cp ${BSDRD} bsd.rd
.include <bsd.obj.mk>
| {
"pile_set_name": "Github"
} |
'use strict'; `\00`;
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.