prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>test_worker.py<|end_file_name|><|fim▁begin|>import unittest
import logging
logging.getLogger().setLevel(logging.DEBUG)
from ...auction.worker import Worker<|fim▁hole|>class TestCase(unittest.TestCase):
def setUp(self):
self.db = Database.pymysql(**sql)
self.ob = Worker(self.db, fail=True)
def test_init(self):
pass<|fim▁end|>
|
from ...database import Database
from ...rc import sql
|
<|file_name|>dtstats.rs<|end_file_name|><|fim▁begin|>use std::io::Result;
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use std::time::Duration;
extern crate dtlib;
fn main() {
let log_result = parse_log(dtlib::constants::LOG_FILE);
if !log_result.is_ok() {
println!("Could not load log.");
}
else {
let log = log_result.unwrap();
let stats_opt = calc_stats(log);
if !stats_opt.is_some() {
println!("Could not calculate stats.");
}
else {
let stats = stats_opt.unwrap();
print!(
" == DT Stats == \n\
\n\
Sectors written: {}\n\
GB written: {}\n\
\n\
Uptime (h): {}\n\
Uptime (d): {}\n\
Uptime (y): {}\n\
\n\
GB/h: {}\n\
GB/d: {}\n\
GB/y: {}\n\
\n\
Days until 100GB written: {}\n\
Days until 1TB written: {}\n\
Days until 100TB written: {} = {} years\n",
stats.sectors_written,
stats.gbytes_written,
stats.uptime_hours,
stats.uptime_days,
stats.uptime_years,
stats.gbytes_per_hour,
stats.gbytes_per_day,
stats.gbytes_per_year,
stats.days_until_100_gb,
stats.days_until_1_tb,
stats.days_until_100_tb, stats.years_until_100_tb
);
}
}
}
struct DtStats {
sectors_written : u64,
gbytes_written : u64,
uptime_hours : f64,
uptime_days : f64,
uptime_years : f32,
gbytes_per_hour : f64,
gbytes_per_day : f64,
gbytes_per_year : f64,
days_until_100_gb : u64,
days_until_1_tb : u64,
days_until_100_tb : u64,
years_until_100_tb : u64
}
struct LogEntry {
sectors_written : usize,
uptime : Duration
}
fn calc_stats(entries : LogIterator) -> Option<DtStats> {
let (sectors, uptime_opt) = entries
.map(|entry| (entry.sectors_written as u64, entry.uptime))
.fold(
(0 as u64, Some(Duration::new(0, 0))),
|(sectors, uptime), (new_sectors, new_uptime)|
(
sectors + new_sectors,
uptime.and_then(
|uptime| uptime.checked_add(new_uptime)
)
)
);
if uptime_opt.is_some() {
let uptime = uptime_opt.unwrap();
let gbytes_written : u64 = (sectors * dtlib::constants::BLOCK_SIZE as u64) / (1024 * 1024 * 1024);
let uptime_hours = uptime.as_secs() as f64 / (60 * 60) as f64;
let uptime_days = uptime_hours / 24 as f64;
let uptime_years = uptime_days / 365 as f64;
let gbytes_per_hour = gbytes_written as f64 / uptime_hours;
let gbytes_per_day = gbytes_written as f64 / uptime_days;
let gbytes_per_year = gbytes_written as f64 / uptime_years;
return Some(
DtStats {
sectors_written: sectors,
gbytes_written: gbytes_written,
uptime_hours: uptime_hours,
uptime_days: uptime_days,
uptime_years: uptime_years as f32,
gbytes_per_hour: gbytes_per_hour,
gbytes_per_day: gbytes_per_day,
gbytes_per_year: gbytes_per_year,
days_until_100_gb: (100.0 as f64 / gbytes_per_day) as u64,
days_until_1_tb: (1024.0 as f64 / gbytes_per_day) as u64,
days_until_100_tb: (102400.0 as f64 / gbytes_per_day) as u64,
years_until_100_tb: (102400.0 as f64 / gbytes_per_day) as u64 / 365
}
);
}
else {
return None;
}
}
fn parse_log_line(line : &String) -> Option<LogEntry> {
let mut split = line.split(':');
let sectors_opt = split.next()
.and_then(|s| s.parse::<usize>().ok());
let utime_opt = split.next()
.and_then(|s| s.parse::<u64>().ok())
.map(|t| Duration::from_secs(t));
return match (sectors_opt, utime_opt) {
(Some(sectors), Some(uptime)) =>
Some(
LogEntry{
sectors_written: sectors,
uptime: uptime
}
),
_ => None
}
}<|fim▁hole|>type LogIterator = Box<Iterator<Item=LogEntry>>;
fn parse_log(path : &str) -> Result<LogIterator> {
let file = File::open(path);
return file.map(
|f| {
let buf_file = BufReader::new(f);
let it = buf_file
.lines()
.filter(|line| line.is_ok())
.map(|line| line.unwrap())
.map(|line| parse_log_line(&line))
.filter(|opt| opt.is_some())
.map(|opt| opt.unwrap());
return Box::new(it) as LogIterator;
}
);
}<|fim▁end|>
| |
<|file_name|>ROI_utils.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# ------------------------------------------------------------------------------
# Copyright (C) 2006-2016 University of Dundee. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# ------------------------------------------------------------------------------
###
#
# ROIUtils allows the mapping of omero.model.ROIDataTypesI to python types
# and to create ROIDataTypesI from ROIUtil types.
# These methods also implement the acceptVisitor method linking to
# the ROIDrawingCanvas.
#
#
# @author Jean-Marie Burel
# <a href="mailto:[email protected]">[email protected]</a>
# @author Donald MacDonald
# <a href="mailto:[email protected]">[email protected]
# </a>
# @version 3.0
# <small>
# (<b>Internal version:</b> $Revision: $Date: $)
# </small>
# @since 3.0-Beta4
#
from omero.model.enums import UnitsLength
from omero.model import LengthI
from omero.model import EllipseI
from omero.model import LineI
from omero.model import RectangleI
from omero.model import PointI
from omero.model import PolylineI
from omero.model import PolygonI
from omero.model import MaskI
from omero.rtypes import rdouble, rint, rstring
#
# HELPERS
#
def pointsStringToXYlist(string):
"""
Method for converting the string returned from
omero.model.ShapeI.getPoints() into list of (x,y) points.
E.g: "points[309,427, 366,503, 190,491] points1[309,427, 366,503,
190,491] points2[309,427, 366,503, 190,491]"<|fim▁hole|> or the new format: "309,427 366,503 190,491"
"""
pointLists = string.strip().split("points")
if len(pointLists) < 2:
if len(pointLists) == 1 and pointLists[0]:
xys = pointLists[0].split()
xyList = [tuple(map(int, xy.split(','))) for xy in xys]
return xyList
msg = "Unrecognised ROI shape 'points' string: %s" % string
raise ValueError(msg)
firstList = pointLists[1]
xyList = []
for xy in firstList.strip(" []").split(", "):
x, y = xy.split(",")
xyList.append((int(x.strip()), int(y.strip())))
return xyList
def xyListToBbox(xyList):
"""
Returns a bounding box (x,y,w,h) that will contain the shape
represented by the XY points list
"""
xList, yList = [], []
for xy in xyList:
x, y = xy
xList.append(x)
yList.append(y)
return (min(xList), min(yList), max(xList)-min(xList),
max(yList)-min(yList))
#
# Data implementation
#
##
# abstract, defines the method that call it as abstract.
#
#
def abstract():
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError(caller + ' must be implemented in subclass')
##
# ShapeSettingsData contains all the display information about
# the ROI that aggregates it.
#
class ShapeSettingsData:
##
# Initialises the default values of the ShapeSettings.
# Stroke has default colour of darkGrey
# StrokeWidth defaults to 1
#
def __init__(self):
self.WHITE = 16777215
self.BLACK = 0
self.GREY = 11184810
self.strokeColour = rint(self.GREY)
self.strokeWidth = LengthI()
self.strokeWidth.setValue(1)
self.strokeWidth.setUnit(UnitsLength.POINT)
self.strokeDashArray = rstring('')
self.fillColour = rint(self.GREY)
self.fillRule = rstring('')
##
# Applies the settings in the ShapeSettingsData to the ROITypeI
# @param shape the omero.model.ROITypeI that these settings will
# be applied to
#
def setROIShapeSettings(self, shape):
shape.setStrokeColor(self.strokeColour)
shape.setStrokeWidth(self.strokeWidth)
shape.setStrokeDashArray(self.strokeDashArray)
shape.setFillColor(self.fillColour)
shape.setFillRule(self.fillRule)
##
# Set the Stroke settings of the ShapeSettings.
# @param colour The colour of the stroke.
# @param width The stroke width.
#
def setStrokeSettings(self, colour, width=1):
self.strokeColour = rint(colour)
self.strokeWidth = LengthI()
self.strokeWidth.setValue(width)
self.strokeWidth.setUnit(UnitsLength.POINT)
###
# Set the Fill Settings for the ShapeSettings.
# @param colour The fill colour of the shape.
def setFillSettings(self, colour):
self.fillColour = rstring(colour)
##
# Get the stroke settings as the tuple (strokeColour, strokeWidth).
# @return See above.
#
def getStrokeSettings(self):
return (self.strokeColour.getValue(), self.strokeWidth.getValue())
##
# Get the fill setting as a tuple of (fillColour)
# @return See above.
#
def getFillSettings(self):
return (self.fillColour.getValue())
##
# Get the tuple ((stokeColor, strokeWidth), (fillColour)).
# @return see above.
#
def getSettings(self):
return (self.getStrokeSettings(), self.getFillSettings())
##
# Set the current shapeSettings from the ROI roi.
# @param roi see above.
#
def getShapeSettingsFromROI(self, roi):
self.strokeColour = roi.getStrokeColor()
self.strokeWidth = roi.getStrokeWidth()
self.strokeDashArray = roi.getStrokeDashArray()
self.fillColour = roi.getFillColor()
self.fillRule = roi.getFillRule()
##
# This class stores the ROI Coordinate (Z,T).
#
class ROICoordinate:
##
# Initialise the ROICoordinate.
# @param z The z-section.
# @param t The timepoint.
def __init__(self, z=0, t=0):
self.theZ = rint(z)
self.theT = rint(t)
##
# Set the (z, t) for the roi using the (z, t) of the ROICoordinate.
# @param roi The ROI to set the (z, t) on.
#
def setROICoord(self, roi):
roi.setTheZ(self.theZ)
roi.setTheT(self.theT)
##
# Get the (z, t) from the ROI.
# @param See above.
#
def setCoordFromROI(self, roi):
self.theZ = roi.getTheZ()
self.theT = roi.getTheT()
##
# Interface to inherit for accepting ROIDrawing as a visitor.
# @param visitor The ROIDrawingCompoent.
#
class ROIDrawingI:
def acceptVisitor(self, visitor):
abstract()
##
# The base class for all ROIShapeData objects.
#
class ShapeData:
##
# Constructor sets up the coord, shapeSettings and ROI objects.
#
def __init__(self):
self.coord = ROICoordinate()
self.shapeSettings = ShapeSettingsData()
self.ROI = None
##
# Set the coord of the class to coord.
# @param See above.
#
def setCoord(self, coord):
self.coord = coord
##
# Set the ROICoordinate of the roi.
# @param roi See above.
#
def setROICoord(self, roi):
self.coord.setROICoord(roi)
##
# Set the Geometry of the roi from the geometry in ShapeData.
# @param roi See above.
#
def setROIGeometry(self, roi):
abstract()
##
# Set the Settings of the ShapeDate form the settings object.
# @param settings See above.
#
def setShapeSettings(self, settings):
self.shapeSettings = settings
##
# Set the Settings of the roi from the setting in ShapeData.
# @param roi See above.
#
def setROIShapeSettings(self, roi):
self.shapeSettings.setROIShapeSettings(roi)
##
# Accept visitor.
# @param visitor See above.
#
def acceptVisitor(self, visitor):
abstract()
##
# Create the base type of ROI for this shape.
#
def createBaseType(self):
abstract()
##
# Get the roi from the ShapeData. If the roi already exists return it.
# Otherwise create it from the ShapeData and return it.
# @return See above.
#
def getROI(self):
if(self.roi is not None):
return self.roi
self.roi = self.createBaseType()
self.setROICoord(self.roi)
self.setROIGeometry(self.roi)
self.setROIShapeSettings(self.roi)
return self.roi
##
# Set the shape settings object from the roi.
# @param roi see above.
#
def getShapeSettingsFromROI(self, roi):
self.shapeSettings.getShapeSettingsFromROI(roi)
##
# Set the ROICoordinate from the roi.
# @param roi See above.
#
def getCoordFromROI(self, roi):
self.coord.setCoordFromROI(roi)
##
# Set the Geometr from the roi.
# @param roi See above.
#
def getGeometryFromROI(self, roi):
abstract()
##
# Get all settings from the roi, Geomerty, Shapesettins, ROICoordinate.
# @param roi See above.
#
def fromROI(self, roi):
self.roi = roi
self.getShapeSettingsFromROI(roi)
self.getCoordFromROI(roi)
self.getGeometryFromROI(roi)
##
# The EllispeData class contains all the manipulation and create of EllipseI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing ellipses.
#
class EllipseData(ShapeData, ROIDrawingI):
##
# Constructor for EllipseData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x The centre x coordinate of the ellipse.
# @param y The centre y coordinate of the ellipse.
# @param radiusX The major axis of the ellipse.
# @param radiusY The minor axis of the ellipse.
def __init__(self, roicoord=ROICoordinate(), x=0, y=0, radiusX=0,
radiusY=0):
ShapeData.__init__(self)
self.x = rdouble(x)
self.y = rdouble(y)
self.radiusX = rdouble(radiusX)
self.radiusY = rdouble(radiusY)
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setROIGeometry
#
def setROIGeometry(self, ellipse):
ellipse.setTheZ(self.coord.theZ)
ellipse.setTheT(self.coord.theT)
ellipse.setX(self.x)
ellipse.setY(self.y)
ellipse.setRadiusX(self.radiusX)
ellipse.setRadiusY(self.radiusY)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX()
self.y = roi.getY()
self.radiusX = roi.getRadiusX()
self.radiusY = roi.getRadiusY()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return EllipseI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawEllipse(
self.x.getValue(), self.y.getValue(), self.radiusX.getValue(),
self.radiusY.getValue(), self.shapeSettings.getSettings())
##
# The RectangleData class contains all the manipulation and creation of
# RectangleI types.
# It also accepts the ROIDrawingUtils visitor for drawing rectangles.
#
class RectangleData(ShapeData, ROIDrawingI):
##
# Constructor for RectangleData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x The top left x - coordinate of the shape.
# @param y The top left y - coordinate of the shape.
# @param width The width of the shape.
# @param height The height of the shape.
def __init__(self, roicoord=ROICoordinate(), x=0, y=0, width=0, height=0):
ShapeData.__init__(self)
self.x = rdouble(x)
self.y = rdouble(y)
self.width = rdouble(width)
self.height = rdouble(height)
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, rectangle):
rectangle.setTheZ(self.coord.theZ)
rectangle.setTheT(self.coord.theT)
rectangle.setX(self.x)
rectangle.setY(self.y)
rectangle.setWidth(self.width)
rectangle.setHeight(self.height)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX()
self.y = roi.getY()
self.width = roi.getWidth()
self.height = roi.getHeight()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return RectangleI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawRectangle(
self.x, self.y, self.width, self.height,
self.shapeSettings.getSettings())
##
# The LineData class contains all the manipulation and create of LineI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing lines.
#
class LineData(ShapeData, ROIDrawingI):
##
# Constructor for LineData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x1 The first x coordinate of the shape.
# @param y1 The first y coordinate of the shape.
# @param x2 The second x coordinate of the shape.
# @param y2 The second y coordinate of the shape.
def __init__(self, roicoord=ROICoordinate(), x1=0, y1=0, x2=0, y2=0):
ShapeData.__init__(self)
self.x1 = rdouble(x1)
self.y1 = rdouble(y1)
self.x2 = rdouble(x2)
self.y2 = rdouble(y2)
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, line):
line.setTheZ(self.coord.theZ)
line.setTheT(self.coord.theT)
line.setX1(self.x1)
line.setY1(self.y1)
line.setX2(self.x2)
line.setY2(self.y2)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x1 = roi.getX1()
self.y1 = roi.getY1()
self.x2 = roi.getX2()
self.y2 = roi.getY2()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return LineI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawLine(
self.x1.getValue(), self.y1.getValue(), self.x2.getValue(),
self.y2.getValue(), self.shapeSettings.getSettings())
##
# The MaskData class contains all the manipulation and create of MaskI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing masks.
#
class MaskData(ShapeData, ROIDrawingI):
##
# Constructor for MaskData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param bytes The mask data.
# @param x The top left x - coordinate of the shape.
# @param y The top left y - coordinate of the shape.
# @param width The width of the shape.
# @param height The height of the shape.
def __init__(self, roicoord=ROICoordinate(), bytes=None,
x=0, y=0, width=0, height=0):
ShapeData.__init__(self)
self.x = rdouble(x)
self.y = rdouble(y)
self.width = rdouble(width)
self.height = rdouble(height)
self.bytesdata = bytes
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, mask):
mask.setTheZ(self.coord.theZ)
mask.setTheT(self.coord.theT)
mask.setX(self.x)
mask.setY(self.y)
mask.setWidth(self.width)
mask.setHeight(self.height)
mask.setBytes(self.bytedata)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX()
self.y = roi.getY()
self.width = roi.getWidth()
self.height = roi.getHeight()
self.bytesdata = roi.getBytes()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return MaskI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawMask(
self.x.getValue(), self.y.getValue(),
self.width.getValue(), self.height.getValue(),
self.bytesdata, self.shapeSettings.getSettings())
##
# The PointData class contains all the manipulation and create of PointI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing points.
#
class PointData(ShapeData, ROIDrawingI):
##
# Constructor for PointData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x The x coordinate of the shape.
# @param y The y coordinate of the shape.
def __init__(self, roicoord=ROICoordinate(), x=0, y=0):
ShapeData.__init__(self)
self.x = rdouble(x)
self.y = rdouble(y)
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, point):
point.setTheZ(self.coord.theZ)
point.setTheT(self.coord.theT)
point.setX(self.x)
point.setY(self.y)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX()
self.y = roi.getY()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PointI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawEllipse(
self.x.getValue(), self.y.getValue(), 3, 3,
self.shapeSettings.getSettings())
##
# The PolygonData class contains all the manipulation and create of PolygonI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing polygons.
#
class PolygonData(ShapeData, ROIDrawingI):
##
# Constructor for PolygonData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param pointList The list of points that make up the polygon,
# as pairs [x1, y1, x2, y2 ..].
def __init__(self, roicoord=ROICoordinate(), pointsList=(0, 0)):
ShapeData.__init__(self)
self.points = rstring(self.listToString(pointsList))
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, polygon):
polygon.setTheZ(self.coord.theZ)
polygon.setTheT(self.coord.theT)
polygon.setPoints(self.points)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.points = roi.getPoints()
##
# Convert a pointsList[x1,y1,x2,y2..] to a string.
# @param pointsList The list of points to convert.
# @return The pointsList converted to a string.
def listToString(self, pointsList):
string = ''
cnt = 0
for element in pointsList:
if(cnt != 0):
string = string + ','
cnt += 1
string = string + str(element)
return string
##
# Convert a string of points to a tuple list [(x1,y1),(x2,y2)..].
# @param pointString The string to convert.
# @return The tuple list converted from a string.
def stringToTupleList(self, pointString):
elements = []
list = pointString.split(',')
numTokens = len(list)
for tokenPair in range(0, numTokens / 2):
elements.append(
(int(list[tokenPair * 2]), int(list[tokenPair * 2 + 1])))
return elements
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PolygonI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawPolygon(self.stringToTupleList(
self.points.getValue()), self.shapeSettings.getSettings())
##
# The PolylineData class contains all the manipulation and create of PolylineI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing polylines.
#
class PolylineData(ShapeData, ROIDrawingI):
##
# Constructor for PolylineData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param pointList The list of points that make up the polygon,
# as pairs [x1, y1, x2, y2 ..].
def __init__(self, roicoord=ROICoordinate(), pointsList=(0, 0)):
ShapeData.__init__(self)
self.points = rstring(self.listToString(pointsList))
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, point):
point.setTheZ(self.coord.theZ)
point.setTheT(self.coord.theT)
point.setPoints(self.points)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.points = roi.getPoints()
##
# Convert a pointsList[x1,y1,x2,y2..] to a string.
# @param pointsList The list of points to convert.
# @return The pointsList converted to a string.
def listToString(self, pointsList):
string = ''
cnt = 0
for element in pointsList:
if(cnt > 0):
string = string + ','
string = string + str(element)
cnt += 1
return string
##
# Convert a string of points to a tuple list [(x1,y1),(x2,y2)..].
# @param pointString The string to convert.
# @return The tuple list converted from a string.
def stringToTupleList(self, pointString):
elements = []
list = pointString.split(',')
numTokens = len(list)
for tokenPair in range(0, numTokens / 2):
elements.append(
(int(list[tokenPair * 2]), int(list[tokenPair * 2 + 1])))
return elements
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PolylineI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawPolyline(self.stringToTupleList(
self.points.getValue()), self.shapeSettings.getSettings())<|fim▁end|>
| |
<|file_name|>zdt2.py<|end_file_name|><|fim▁begin|>__author__ = 'jdaniel'
from GaiaSolve.model import Model
class ZDT2(Model):
def __init__(self):
super(ZDT2, self).__init__()
def evaluate(self):
g = 1.0 + 9.0*sum(self.x[1:])/(len(self.x) - 1)
f1 = self.x[0]
f2 = g*(1.0 - (f1/g)**2)
self.obj = [f1, f2]
self.eqcon = []
self.neqcon = []
def number_of_design_variables(self):
return 30
def lower_bound(self):
return [0.0]*30
def upper_bound(self):
return [1.0]*30
def number_of_objectives(self):
return 2
def has_equality_constraints(self):
return False
def number_of_equality_constraints(self):
return 0
def has_inequality_constraints(self):
return False
def number_of_inequality_constraints(self):
return 0
def decision_variable_names(self):
x_names = []
for i in range(30):
x_names.append('x' + str(i))<|fim▁hole|>
def objective_variable_names(self):
return ['f1', 'f2']
def equality_constraint_variable_names(self):
return []
def inequality_constraint_variable_names(self):
return []<|fim▁end|>
|
return x_names
|
<|file_name|>test.py<|end_file_name|><|fim▁begin|>import sys
from m2ext import SSL
from M2Crypto import X509
<|fim▁hole|>ctx = SSL.Context()
ctx.load_verify_locations(capath=sys.argv[2])
if ctx.validate_certificate(cert):
print "valid"
else:
print "invalid"<|fim▁end|>
|
print "Validating certificate %s using CApath %s" % (sys.argv[1], sys.argv[2])
cert = X509.load_cert(sys.argv[1])
|
<|file_name|>FileBackend.java<|end_file_name|><|fim▁begin|>package eu.siacs.conversations.persistance;
import android.annotation.TargetApi;
import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Matrix;
import android.graphics.Paint;
import android.graphics.RectF;
import android.media.MediaMetadataRetriever;
import android.net.Uri;
import android.os.Build;
import android.os.Environment;
import android.os.ParcelFileDescriptor;
import android.provider.MediaStore;
import android.provider.OpenableColumns;
import android.support.v4.content.FileProvider;
import android.system.Os;
import android.system.StructStat;
import android.util.Base64;
import android.util.Base64OutputStream;
import android.util.Log;
import android.util.LruCache;
import android.webkit.MimeTypeMap;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.net.URL;
import java.security.DigestOutputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import eu.siacs.conversations.Config;
import eu.siacs.conversations.R;
import eu.siacs.conversations.entities.DownloadableFile;
import eu.siacs.conversations.entities.Message;
import eu.siacs.conversations.services.XmppConnectionService;
import eu.siacs.conversations.utils.CryptoHelper;
import eu.siacs.conversations.utils.ExifHelper;
import eu.siacs.conversations.utils.FileUtils;
import eu.siacs.conversations.utils.FileWriterException;
import eu.siacs.conversations.utils.MimeUtils;
import eu.siacs.conversations.xmpp.pep.Avatar;
public class FileBackend {
private static final Object THUMBNAIL_LOCK = new Object();
private static final SimpleDateFormat IMAGE_DATE_FORMAT = new SimpleDateFormat("yyyyMMdd_HHmmss", Locale.US);
public static final String FILE_PROVIDER = ".files";
private XmppConnectionService mXmppConnectionService;
private static final List<String> BLACKLISTED_PATH_ELEMENTS = Arrays.asList("org.mozilla.firefox");
public FileBackend(XmppConnectionService service) {
this.mXmppConnectionService = service;
}
private void createNoMedia() {
final File nomedia = new File(getConversationsDirectory("Files") + ".nomedia");
if (!nomedia.exists()) {
try {
nomedia.createNewFile();
} catch (Exception e) {
Log.d(Config.LOGTAG, "could not create nomedia file");
}
}
}
public void updateMediaScanner(File file) {
String path = file.getAbsolutePath();
if (!path.startsWith(getConversationsDirectory("Files"))) {
Intent intent = new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE);
intent.setData(Uri.fromFile(file));
mXmppConnectionService.sendBroadcast(intent);
} else {
createNoMedia();
}
}
public boolean deleteFile(Message message) {
File file = getFile(message);
if (file.delete()) {
updateMediaScanner(file);
return true;
} else {
return false;
}
}
public DownloadableFile getFile(Message message) {
return getFile(message, true);
}
public DownloadableFile getFileForPath(String path, String mime) {
final DownloadableFile file;
if (path.startsWith("/")) {
file = new DownloadableFile(path);
} else {
if (mime != null && mime.startsWith("image/")) {
file = new DownloadableFile(getConversationsDirectory("Images") + path);
} else if (mime != null && mime.startsWith("video/")) {
file = new DownloadableFile(getConversationsDirectory("Videos") + path);
} else {
file = new DownloadableFile(getConversationsDirectory("Files") + path);
}
}
return file;
}
public DownloadableFile getFile(Message message, boolean decrypted) {
final boolean encrypted = !decrypted
&& (message.getEncryption() == Message.ENCRYPTION_PGP
|| message.getEncryption() == Message.ENCRYPTION_DECRYPTED);
String path = message.getRelativeFilePath();
if (path == null) {
path = message.getUuid();
}
final DownloadableFile file = getFileForPath(path, message.getMimeType());
if (encrypted) {
return new DownloadableFile(getConversationsDirectory("Files") + file.getName() + ".pgp");
} else {
return file;
}
}
public static long getFileSize(Context context, Uri uri) {
try {
final Cursor cursor = context.getContentResolver().query(uri, null, null, null, null);
if (cursor != null && cursor.moveToFirst()) {
long size = cursor.getLong(cursor.getColumnIndex(OpenableColumns.SIZE));
cursor.close();
return size;
} else {
return -1;
}
} catch (Exception e) {
return -1;
}
}
public static boolean allFilesUnderSize(Context context, List<Uri> uris, long max) {
if (max <= 0) {
Log.d(Config.LOGTAG, "server did not report max file size for http upload");
return true; //exception to be compatible with HTTP Upload < v0.2
}
for (Uri uri : uris) {
String mime = context.getContentResolver().getType(uri);
if (mime != null && mime.startsWith("video/")) {
try {
Dimensions dimensions = FileBackend.getVideoDimensions(context, uri);
if (dimensions.getMin() > 720) {
Log.d(Config.LOGTAG, "do not consider video file with min width larger than 720 for size check");
continue;
}
} catch (NotAVideoFile notAVideoFile) {
//ignore and fall through
}
}
if (FileBackend.getFileSize(context, uri) > max) {
Log.d(Config.LOGTAG, "not all files are under " + max + " bytes. suggesting falling back to jingle");
return false;
}
}
return true;
}
public String getConversationsDirectory(final String type) {
if (Config.ONLY_INTERNAL_STORAGE) {
return mXmppConnectionService.getFilesDir().getAbsolutePath() + "/" + type + "/";
} else {
return Environment.getExternalStorageDirectory() + "/Conversations/Media/Conversations " + type + "/";
}
}
public static String getConversationsLogsDirectory() {
return Environment.getExternalStorageDirectory().getAbsolutePath() + "/Conversations/";
}
public Bitmap resize(Bitmap originalBitmap, int size) {
int w = originalBitmap.getWidth();
int h = originalBitmap.getHeight();
if (Math.max(w, h) > size) {
int scalledW;
int scalledH;
if (w <= h) {
scalledW = (int) (w / ((double) h / size));
scalledH = size;
} else {
scalledW = size;
scalledH = (int) (h / ((double) w / size));
}
Bitmap result = Bitmap.createScaledBitmap(originalBitmap, scalledW, scalledH, true);
if (originalBitmap != null && !originalBitmap.isRecycled()) {
originalBitmap.recycle();
}
return result;
} else {
return originalBitmap;
}
}
public static Bitmap rotate(Bitmap bitmap, int degree) {
if (degree == 0) {
return bitmap;
}
int w = bitmap.getWidth();
int h = bitmap.getHeight();
Matrix mtx = new Matrix();
mtx.postRotate(degree);
Bitmap result = Bitmap.createBitmap(bitmap, 0, 0, w, h, mtx, true);
if (bitmap != null && !bitmap.isRecycled()) {
bitmap.recycle();
}
return result;
}
public boolean useImageAsIs(Uri uri) {
String path = getOriginalPath(uri);
if (path == null || isPathBlacklisted(path)) {
return false;
}
File file = new File(path);
long size = file.length();
if (size == 0 || size >= mXmppConnectionService.getResources().getInteger(R.integer.auto_accept_filesize)) {
return false;
}
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
try {
BitmapFactory.decodeStream(mXmppConnectionService.getContentResolver().openInputStream(uri), null, options);
if (options.outMimeType == null || options.outHeight <= 0 || options.outWidth <= 0) {
return false;
}
return (options.outWidth <= Config.IMAGE_SIZE && options.outHeight <= Config.IMAGE_SIZE && options.outMimeType.contains(Config.IMAGE_FORMAT.name().toLowerCase()));
} catch (FileNotFoundException e) {
return false;
}
}
public static boolean isPathBlacklisted(String path) {
for(String element : BLACKLISTED_PATH_ELEMENTS) {
if (path.contains(element)) {
return true;
}
}
return false;
}
public String getOriginalPath(Uri uri) {
return FileUtils.getPath(mXmppConnectionService, uri);
}
public void copyFileToPrivateStorage(File file, Uri uri) throws FileCopyException {
Log.d(Config.LOGTAG, "copy file (" + uri.toString() + ") to private storage " + file.getAbsolutePath());
file.getParentFile().mkdirs();
OutputStream os = null;
InputStream is = null;
try {
file.createNewFile();
os = new FileOutputStream(file);
is = mXmppConnectionService.getContentResolver().openInputStream(uri);
byte[] buffer = new byte[1024];
int length;
while ((length = is.read(buffer)) > 0) {
try {
os.write(buffer, 0, length);
} catch (IOException e) {
throw new FileWriterException();
}
}
try {
os.flush();
} catch (IOException e) {
throw new FileWriterException();
}
} catch (FileNotFoundException e) {
throw new FileCopyException(R.string.error_file_not_found);
} catch (FileWriterException e) {
throw new FileCopyException(R.string.error_unable_to_create_temporary_file);
} catch (IOException e) {
e.printStackTrace();
throw new FileCopyException(R.string.error_io_exception);
} finally {
close(os);
close(is);
}
}
public void copyFileToPrivateStorage(Message message, Uri uri, String type) throws FileCopyException {
String mime = type != null ? type : MimeUtils.guessMimeTypeFromUri(mXmppConnectionService, uri);
Log.d(Config.LOGTAG, "copy " + uri.toString() + " to private storage (mime=" + mime + ")");
String extension = MimeUtils.guessExtensionFromMimeType(mime);
if (extension == null) {
extension = getExtensionFromUri(uri);
}
message.setRelativeFilePath(message.getUuid() + "." + extension);
copyFileToPrivateStorage(mXmppConnectionService.getFileBackend().getFile(message), uri);
}
private String getExtensionFromUri(Uri uri) {
String[] projection = {MediaStore.MediaColumns.DATA};
String filename = null;
Cursor cursor = mXmppConnectionService.getContentResolver().query(uri, projection, null, null, null);
if (cursor != null) {
try {
if (cursor.moveToFirst()) {
filename = cursor.getString(0);
}
} catch (Exception e) {
filename = null;
} finally {
cursor.close();
}
}
int pos = filename == null ? -1 : filename.lastIndexOf('.');
return pos > 0 ? filename.substring(pos + 1) : null;
}
private void copyImageToPrivateStorage(File file, Uri image, int sampleSize) throws FileCopyException {
file.getParentFile().mkdirs();
InputStream is = null;
OutputStream os = null;
try {
if (!file.exists() && !file.createNewFile()) {
throw new FileCopyException(R.string.error_unable_to_create_temporary_file);
}
is = mXmppConnectionService.getContentResolver().openInputStream(image);
if (is == null) {
throw new FileCopyException(R.string.error_not_an_image_file);
}
Bitmap originalBitmap;
BitmapFactory.Options options = new BitmapFactory.Options();
int inSampleSize = (int) Math.pow(2, sampleSize);
Log.d(Config.LOGTAG, "reading bitmap with sample size " + inSampleSize);
options.inSampleSize = inSampleSize;
originalBitmap = BitmapFactory.decodeStream(is, null, options);
is.close();
if (originalBitmap == null) {
throw new FileCopyException(R.string.error_not_an_image_file);
}
Bitmap scaledBitmap = resize(originalBitmap, Config.IMAGE_SIZE);
int rotation = getRotation(image);
scaledBitmap = rotate(scaledBitmap, rotation);
boolean targetSizeReached = false;
int quality = Config.IMAGE_QUALITY;
final int imageMaxSize = mXmppConnectionService.getResources().getInteger(R.integer.auto_accept_filesize);
while (!targetSizeReached) {
os = new FileOutputStream(file);
boolean success = scaledBitmap.compress(Config.IMAGE_FORMAT, quality, os);
if (!success) {
throw new FileCopyException(R.string.error_compressing_image);
}
os.flush();
targetSizeReached = file.length() <= imageMaxSize || quality <= 50;
quality -= 5;
}
scaledBitmap.recycle();
} catch (FileNotFoundException e) {
throw new FileCopyException(R.string.error_file_not_found);
} catch (IOException e) {
e.printStackTrace();
throw new FileCopyException(R.string.error_io_exception);
} catch (SecurityException e) {
throw new FileCopyException(R.string.error_security_exception_during_image_copy);
} catch (OutOfMemoryError e) {
++sampleSize;
if (sampleSize <= 3) {
copyImageToPrivateStorage(file, image, sampleSize);
} else {
throw new FileCopyException(R.string.error_out_of_memory);
}
} finally {
close(os);
close(is);
}
}
public void copyImageToPrivateStorage(File file, Uri image) throws FileCopyException {
Log.d(Config.LOGTAG, "copy image (" + image.toString() + ") to private storage " + file.getAbsolutePath());
copyImageToPrivateStorage(file, image, 0);
}
public void copyImageToPrivateStorage(Message message, Uri image) throws FileCopyException {
switch (Config.IMAGE_FORMAT) {
case JPEG:
message.setRelativeFilePath(message.getUuid() + ".jpg");
break;
case PNG:
message.setRelativeFilePath(message.getUuid() + ".png");
break;
case WEBP:
message.setRelativeFilePath(message.getUuid() + ".webp");
break;
}
copyImageToPrivateStorage(getFile(message), image);
updateFileParams(message);
}
private int getRotation(File file) {
return getRotation(Uri.parse("file://" + file.getAbsolutePath()));
}
private int getRotation(Uri image) {
InputStream is = null;
try {
is = mXmppConnectionService.getContentResolver().openInputStream(image);
return ExifHelper.getOrientation(is);
} catch (FileNotFoundException e) {
return 0;
} finally {
close(is);
}
}
public Bitmap getThumbnail(Message message, int size, boolean cacheOnly) throws FileNotFoundException {
final String uuid = message.getUuid();
final LruCache<String, Bitmap> cache = mXmppConnectionService.getBitmapCache();
Bitmap thumbnail = cache.get(uuid);
if ((thumbnail == null) && (!cacheOnly)) {
synchronized (THUMBNAIL_LOCK) {
thumbnail = cache.get(uuid);
if (thumbnail != null) {
return thumbnail;
}
DownloadableFile file = getFile(message);
final String mime = file.getMimeType();
if (mime.startsWith("video/")) {
thumbnail = getVideoPreview(file, size);
} else {
Bitmap fullsize = getFullsizeImagePreview(file, size);
if (fullsize == null) {
throw new FileNotFoundException();
}
thumbnail = resize(fullsize, size);
thumbnail = rotate(thumbnail, getRotation(file));
if (mime.equals("image/gif")) {
Bitmap withGifOverlay = thumbnail.copy(Bitmap.Config.ARGB_8888, true);
drawOverlay(withGifOverlay, R.drawable.play_gif, 1.0f);
thumbnail.recycle();
thumbnail = withGifOverlay;
}
}
this.mXmppConnectionService.getBitmapCache().put(uuid, thumbnail);
}
}
return thumbnail;
}
private Bitmap getFullsizeImagePreview(File file, int size) {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inSampleSize = calcSampleSize(file, size);
try {
return BitmapFactory.decodeFile(file.getAbsolutePath(), options);
} catch (OutOfMemoryError e) {
options.inSampleSize *= 2;
return BitmapFactory.decodeFile(file.getAbsolutePath(), options);
}
}
private void drawOverlay(Bitmap bitmap, int resource, float factor) {
Bitmap overlay = BitmapFactory.decodeResource(mXmppConnectionService.getResources(), resource);
Canvas canvas = new Canvas(bitmap);
float targetSize = Math.min(canvas.getWidth(), canvas.getHeight()) * factor;
Log.d(Config.LOGTAG, "target size overlay: " + targetSize + " overlay bitmap size was " + overlay.getHeight());
float left = (canvas.getWidth() - targetSize) / 2.0f;
float top = (canvas.getHeight() - targetSize) / 2.0f;
RectF dst = new RectF(left, top, left + targetSize - 1, top + targetSize - 1);
canvas.drawBitmap(overlay, null, dst, createAntiAliasingPaint());
}
private static Paint createAntiAliasingPaint() {
Paint paint = new Paint();
paint.setAntiAlias(true);
paint.setFilterBitmap(true);
paint.setDither(true);
return paint;
}
private Bitmap getVideoPreview(File file, int size) {
MediaMetadataRetriever metadataRetriever = new MediaMetadataRetriever();
Bitmap frame;
try {
metadataRetriever.setDataSource(file.getAbsolutePath());
frame = metadataRetriever.getFrameAtTime(0);
metadataRetriever.release();
frame = resize(frame, size);
} catch (RuntimeException e) {
frame = Bitmap.createBitmap(size, size, Bitmap.Config.ARGB_8888);
frame.eraseColor(0xff000000);
}
drawOverlay(frame, R.drawable.play_video, 0.75f);
return frame;
}
private static String getTakePhotoPath() {
return Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DCIM) + "/Camera/";
}
public Uri getTakePhotoUri() {
File file;
if (Config.ONLY_INTERNAL_STORAGE) {
file = new File(mXmppConnectionService.getCacheDir().getAbsolutePath(), "Camera/IMG_" + this.IMAGE_DATE_FORMAT.format(new Date()) + ".jpg");
} else {
file = new File(getTakePhotoPath() + "IMG_" + this.IMAGE_DATE_FORMAT.format(new Date()) + ".jpg");
}
file.getParentFile().mkdirs();
return getUriForFile(mXmppConnectionService, file);
}
public static Uri getUriForFile(Context context, File file) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N || Config.ONLY_INTERNAL_STORAGE) {
try {
String packageId = context.getPackageName();
return FileProvider.getUriForFile(context, packageId + FILE_PROVIDER, file);
} catch (IllegalArgumentException e) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
throw new SecurityException(e);
} else {
return Uri.fromFile(file);
}
}
} else {
return Uri.fromFile(file);
}
}
public static Uri getIndexableTakePhotoUri(Uri original) {
if (Config.ONLY_INTERNAL_STORAGE || "file".equals(original.getScheme())) {
return original;
} else {
List<String> segments = original.getPathSegments();
return Uri.parse("file://" + getTakePhotoPath() + segments.get(segments.size() - 1));
}
}
public Avatar getPepAvatar(Uri image, int size, Bitmap.CompressFormat format) {
Bitmap bm = cropCenterSquare(image, size);
if (bm == null) {
return null;
}
if (hasAlpha(bm)) {
Log.d(Config.LOGTAG,"alpha in avatar detected; uploading as PNG");
bm.recycle();
bm = cropCenterSquare(image, 96);
return getPepAvatar(bm, Bitmap.CompressFormat.PNG, 100);
}
return getPepAvatar(bm, format, 100);
}
private static boolean hasAlpha(final Bitmap bitmap) {
for(int x = 0; x < bitmap.getWidth(); ++x) {
for(int y = 0; y < bitmap.getWidth(); ++y) {
if (Color.alpha(bitmap.getPixel(x,y)) < 255) {
return true;
}
}
}
return false;
}
private Avatar getPepAvatar(Bitmap bitmap, Bitmap.CompressFormat format, int quality) {
try {
ByteArrayOutputStream mByteArrayOutputStream = new ByteArrayOutputStream();
Base64OutputStream mBase64OutputStream = new Base64OutputStream(mByteArrayOutputStream, Base64.DEFAULT);
MessageDigest digest = MessageDigest.getInstance("SHA-1");
DigestOutputStream mDigestOutputStream = new DigestOutputStream(mBase64OutputStream, digest);
if (!bitmap.compress(format, quality, mDigestOutputStream)) {
return null;
}
mDigestOutputStream.flush();
mDigestOutputStream.close();
long chars = mByteArrayOutputStream.size();
if (format != Bitmap.CompressFormat.PNG && quality >= 50 && chars >= Config.AVATAR_CHAR_LIMIT) {
int q = quality - 2;
Log.d(Config.LOGTAG, "avatar char length was " + chars + " reducing quality to " + q);
return getPepAvatar(bitmap, format, q);
}
Log.d(Config.LOGTAG, "settled on char length " + chars + " with quality=" + quality);
final Avatar avatar = new Avatar();
avatar.sha1sum = CryptoHelper.bytesToHex(digest.digest());
avatar.image = new String(mByteArrayOutputStream.toByteArray());
if (format.equals(Bitmap.CompressFormat.WEBP)) {
avatar.type = "image/webp";
} else if (format.equals(Bitmap.CompressFormat.JPEG)) {
avatar.type = "image/jpeg";
} else if (format.equals(Bitmap.CompressFormat.PNG)) {
avatar.type = "image/png";
}
avatar.width = bitmap.getWidth();
avatar.height = bitmap.getHeight();
return avatar;
} catch (Exception e) {
return null;
}
}
public Avatar getStoredPepAvatar(String hash) {
if (hash == null) {
return null;
}
Avatar avatar = new Avatar();
File file = new File(getAvatarPath(hash));
FileInputStream is = null;
try {
avatar.size = file.length();
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
BitmapFactory.decodeFile(file.getAbsolutePath(), options);
is = new FileInputStream(file);
ByteArrayOutputStream mByteArrayOutputStream = new ByteArrayOutputStream();
Base64OutputStream mBase64OutputStream = new Base64OutputStream(mByteArrayOutputStream, Base64.DEFAULT);
MessageDigest digest = MessageDigest.getInstance("SHA-1");
DigestOutputStream os = new DigestOutputStream(mBase64OutputStream, digest);
byte[] buffer = new byte[4096];
int length;
while ((length = is.read(buffer)) > 0) {
os.write(buffer, 0, length);
}
os.flush();
os.close();
avatar.sha1sum = CryptoHelper.bytesToHex(digest.digest());
avatar.image = new String(mByteArrayOutputStream.toByteArray());
avatar.height = options.outHeight;
avatar.width = options.outWidth;
avatar.type = options.outMimeType;
return avatar;
} catch (IOException e) {
return null;
} catch (NoSuchAlgorithmException e) {
return null;
} finally {
close(is);
}
}
public boolean isAvatarCached(Avatar avatar) {
File file = new File(getAvatarPath(avatar.getFilename()));
return file.exists();
}
public boolean save(Avatar avatar) {
File file;
if (isAvatarCached(avatar)) {
file = new File(getAvatarPath(avatar.getFilename()));
avatar.size = file.length();
} else {
String filename = getAvatarPath(avatar.getFilename());
file = new File(filename + ".tmp");
file.getParentFile().mkdirs();
OutputStream os = null;
try {
file.createNewFile();
os = new FileOutputStream(file);
MessageDigest digest = MessageDigest.getInstance("SHA-1");
digest.reset();
DigestOutputStream mDigestOutputStream = new DigestOutputStream(os, digest);
final byte[] bytes = avatar.getImageAsBytes();
mDigestOutputStream.write(bytes);
mDigestOutputStream.flush();
mDigestOutputStream.close();
String sha1sum = CryptoHelper.bytesToHex(digest.digest());
if (sha1sum.equals(avatar.sha1sum)) {
file.renameTo(new File(filename));
} else {
Log.d(Config.LOGTAG, "sha1sum mismatch for " + avatar.owner);
file.delete();
return false;
}
avatar.size = bytes.length;
} catch (IllegalArgumentException | IOException | NoSuchAlgorithmException e) {
return false;
} finally {
close(os);
}
}
return true;
}
public String getAvatarPath(String avatar) {
return mXmppConnectionService.getFilesDir().getAbsolutePath() + "/avatars/" + avatar;
}
public Uri getAvatarUri(String avatar) {
return Uri.parse("file:" + getAvatarPath(avatar));
}
public Bitmap cropCenterSquare(Uri image, int size) {
if (image == null) {
return null;
}
InputStream is = null;
try {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inSampleSize = calcSampleSize(image, size);
is = mXmppConnectionService.getContentResolver().openInputStream(image);
if (is == null) {
return null;
}
Bitmap input = BitmapFactory.decodeStream(is, null, options);
if (input == null) {
return null;
} else {
input = rotate(input, getRotation(image));
return cropCenterSquare(input, size);
}
} catch (SecurityException e) {
return null; // happens for example on Android 6.0 if contacts permissions get revoked
} catch (FileNotFoundException e) {
return null;
} finally {
close(is);
}
}
public Bitmap cropCenter(Uri image, int newHeight, int newWidth) {
if (image == null) {
return null;
}
InputStream is = null;
try {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inSampleSize = calcSampleSize(image, Math.max(newHeight, newWidth));
is = mXmppConnectionService.getContentResolver().openInputStream(image);
if (is == null) {
return null;
}
Bitmap source = BitmapFactory.decodeStream(is, null, options);
if (source == null) {
return null;
}
int sourceWidth = source.getWidth();
int sourceHeight = source.getHeight();
float xScale = (float) newWidth / sourceWidth;
float yScale = (float) newHeight / sourceHeight;
float scale = Math.max(xScale, yScale);
float scaledWidth = scale * sourceWidth;
float scaledHeight = scale * sourceHeight;
float left = (newWidth - scaledWidth) / 2;
float top = (newHeight - scaledHeight) / 2;
RectF targetRect = new RectF(left, top, left + scaledWidth, top + scaledHeight);
Bitmap dest = Bitmap.createBitmap(newWidth, newHeight, Bitmap.Config.ARGB_8888);
Canvas canvas = new Canvas(dest);
canvas.drawBitmap(source, null, targetRect, createAntiAliasingPaint());
if (source.isRecycled()) {
source.recycle();
}
return dest;
} catch (SecurityException e) {
return null; //android 6.0 with revoked permissions for example
} catch (FileNotFoundException e) {
return null;
} finally {
close(is);
}
}
public Bitmap cropCenterSquare(Bitmap input, int size) {
int w = input.getWidth();
int h = input.getHeight();
float scale = Math.max((float) size / h, (float) size / w);
float outWidth = scale * w;
float outHeight = scale * h;
float left = (size - outWidth) / 2;
float top = (size - outHeight) / 2;
RectF target = new RectF(left, top, left + outWidth, top + outHeight);
Bitmap output = Bitmap.createBitmap(size, size, Bitmap.Config.ARGB_8888);
Canvas canvas = new Canvas(output);
canvas.drawBitmap(input, null, target, createAntiAliasingPaint());
if (!input.isRecycled()) {
input.recycle();
}
return output;
}
private int calcSampleSize(Uri image, int size) throws FileNotFoundException, SecurityException {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
BitmapFactory.decodeStream(mXmppConnectionService.getContentResolver().openInputStream(image), null, options);
return calcSampleSize(options, size);
}
private static int calcSampleSize(File image, int size) {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
BitmapFactory.decodeFile(image.getAbsolutePath(), options);
return calcSampleSize(options, size);
}
public static int calcSampleSize(BitmapFactory.Options options, int size) {
int height = options.outHeight;
int width = options.outWidth;
int inSampleSize = 1;
if (height > size || width > size) {
int halfHeight = height / 2;
int halfWidth = width / 2;
while ((halfHeight / inSampleSize) > size
&& (halfWidth / inSampleSize) > size) {
inSampleSize *= 2;
}
}
return inSampleSize;
}
public void updateFileParams(Message message) {
updateFileParams(message, null);
}
public void updateFileParams(Message message, URL url) {
DownloadableFile file = getFile(message);
final String mime = file.getMimeType();
boolean image = message.getType() == Message.TYPE_IMAGE || (mime != null && mime.startsWith("image/"));
boolean video = mime != null && mime.startsWith("video/");
boolean audio = mime != null && mime.startsWith("audio/");
final StringBuilder body = new StringBuilder();
if (url != null) {
body.append(url.toString());
}
body.append('|').append(file.getSize());
if (image || video) {
try {
Dimensions dimensions = image ? getImageDimensions(file) : getVideoDimensions(file);
body.append('|').append(dimensions.width).append('|').append(dimensions.height);
} catch (NotAVideoFile notAVideoFile) {
Log.d(Config.LOGTAG, "file with mime type " + file.getMimeType() + " was not a video file");
//fall threw
}
} else if (audio) {
body.append("|0|0|").append(getMediaRuntime(file));
}
message.setBody(body.toString());
}
public int getMediaRuntime(Uri uri) {
try {
MediaMetadataRetriever mediaMetadataRetriever = new MediaMetadataRetriever();
mediaMetadataRetriever.setDataSource(mXmppConnectionService, uri);
return Integer.parseInt(mediaMetadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION));
} catch (RuntimeException e) {
return 0;
}
}
private int getMediaRuntime(File file) {
try {
MediaMetadataRetriever mediaMetadataRetriever = new MediaMetadataRetriever();
mediaMetadataRetriever.setDataSource(file.toString());
return Integer.parseInt(mediaMetadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION));
} catch (RuntimeException e) {
return 0;
}
}
private Dimensions getImageDimensions(File file) {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
BitmapFactory.decodeFile(file.getAbsolutePath(), options);
int rotation = getRotation(file);
boolean rotated = rotation == 90 || rotation == 270;
int imageHeight = rotated ? options.outWidth : options.outHeight;
int imageWidth = rotated ? options.outHeight : options.outWidth;
return new Dimensions(imageHeight, imageWidth);
}
private Dimensions getVideoDimensions(File file) throws NotAVideoFile {
MediaMetadataRetriever metadataRetriever = new MediaMetadataRetriever();
try {
metadataRetriever.setDataSource(file.getAbsolutePath());
} catch (RuntimeException e) {
throw new NotAVideoFile(e);
}
return getVideoDimensions(metadataRetriever);
}
private static Dimensions getVideoDimensions(Context context, Uri uri) throws NotAVideoFile {
MediaMetadataRetriever mediaMetadataRetriever = new MediaMetadataRetriever();
try {
mediaMetadataRetriever.setDataSource(context, uri);
} catch (RuntimeException e) {
throw new NotAVideoFile(e);
}
return getVideoDimensions(mediaMetadataRetriever);
}
private static Dimensions getVideoDimensions(MediaMetadataRetriever metadataRetriever) throws NotAVideoFile {
String hasVideo = metadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_HAS_VIDEO);
if (hasVideo == null) {
throw new NotAVideoFile();
}
int rotation = extractRotationFromMediaRetriever(metadataRetriever);
boolean rotated = rotation == 90 || rotation == 270;
int height;
try {
String h = metadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_VIDEO_HEIGHT);
height = Integer.parseInt(h);
} catch (Exception e) {
height = -1;
}
int width;
try {
String w = metadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_VIDEO_WIDTH);
width = Integer.parseInt(w);
} catch (Exception e) {
width = -1;
}
metadataRetriever.release();
Log.d(Config.LOGTAG, "extracted video dims " + width + "x" + height);
return rotated ? new Dimensions(width, height) : new Dimensions(height, width);
}
private static int extractRotationFromMediaRetriever(MediaMetadataRetriever metadataRetriever) {
int rotation;
if (Build.VERSION.SDK_INT >= 17) {
String r = metadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_VIDEO_ROTATION);
try {
rotation = Integer.parseInt(r);
} catch (Exception e) {
rotation = 0;
}
} else {
rotation = 0;
}
return rotation;
}
private static class Dimensions {
public final int width;
public final int height;
public Dimensions(int height, int width) {
this.width = width;
this.height = height;
}
public int getMin() {
return Math.min(width, height);
}
}
private static class NotAVideoFile extends Exception {
public NotAVideoFile(Throwable t) {
super(t);
}
public NotAVideoFile() {
super();
}
}
public class FileCopyException extends Exception {
private static final long serialVersionUID = -1010013599132881427L;
private int resId;
public FileCopyException(int resId) {
this.resId = resId;
}
public int getResId() {
return resId;
}
}
public Bitmap getAvatar(String avatar, int size) {
if (avatar == null) {
return null;
}
Bitmap bm = cropCenter(getAvatarUri(avatar), size, size);
if (bm == null) {
return null;
}
return bm;
}
public boolean isFileAvailable(Message message) {
return getFile(message).exists();
}
public static void close(Closeable stream) {
if (stream != null) {
try {<|fim▁hole|> }
}
}
public static void close(Socket socket) {
if (socket != null) {
try {
socket.close();
} catch (IOException e) {
}
}
}
public static boolean weOwnFile(Context context, Uri uri) {
if (uri == null || !ContentResolver.SCHEME_FILE.equals(uri.getScheme())) {
return false;
} else if (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP) {
return fileIsInFilesDir(context, uri);
} else {
return weOwnFileLollipop(uri);
}
}
/**
* This is more than hacky but probably way better than doing nothing
* Further 'optimizations' might contain to get the parents of CacheDir and NoBackupDir
* and check against those as well
*/
private static boolean fileIsInFilesDir(Context context, Uri uri) {
try {
final String haystack = context.getFilesDir().getParentFile().getCanonicalPath();
final String needle = new File(uri.getPath()).getCanonicalPath();
return needle.startsWith(haystack);
} catch (IOException e) {
return false;
}
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
private static boolean weOwnFileLollipop(Uri uri) {
try {
File file = new File(uri.getPath());
FileDescriptor fd = ParcelFileDescriptor.open(file, ParcelFileDescriptor.MODE_READ_ONLY).getFileDescriptor();
StructStat st = Os.fstat(fd);
return st.st_uid == android.os.Process.myUid();
} catch (FileNotFoundException e) {
return false;
} catch (Exception e) {
return true;
}
}
}<|fim▁end|>
|
stream.close();
} catch (IOException e) {
|
<|file_name|>thisplace.py<|end_file_name|><|fim▁begin|>"""
thisplace: Human-readable addresses for every 3x3m square on the earth's surface.
The simplest way to use this module are the `four_words` and `decode`
functions. For more see `WordHasher`.
"""
import random
import geohash
def get_words(fname):
lines = open(fname)
words = []
for word in lines:
words.append(word.strip())
lines.close()
random.seed(634634)
random.shuffle(words)
words = words[:2**15]
assert len(words) == len(set(words))
return words
# These read like alien races from a sci-fi book
GOOGLE_WORDLIST = get_words("words/google-ngram-list")
# shorter list with only 4096 words
GOOGLE_4096WORDS = get_words("words/google-ngram-list-4096")
# current best list for the three word hash
WORDNET_LEMMAS = get_words("words/wordnet-list")
# Human friendly word list, taken directly from humanhash project
# these are the best words but there are not enough of
# them so we only use them for the six word hash
HUMAN_WORDLIST = (
'ack', 'alabama', 'alanine', 'alaska', 'alpha', 'angel', 'apart', 'april',
'arizona', 'arkansas', 'artist', 'asparagus', 'aspen', 'august', 'autumn',
'avocado', 'bacon', 'bakerloo', 'batman', 'beer', 'berlin', 'beryllium',
'black', 'blossom', 'blue', 'bluebird', 'bravo', 'bulldog', 'burger',
'butter', 'california', 'carbon', 'cardinal', 'carolina', 'carpet', 'cat',
'ceiling', 'charlie', 'chicken', 'coffee', 'cola', 'cold', 'colorado',
'comet', 'connecticut', 'crazy', 'cup', 'dakota', 'december', 'delaware',
'delta', 'diet', 'don', 'double', 'early', 'earth', 'east', 'echo',
'edward', 'eight', 'eighteen', 'eleven', 'emma', 'enemy', 'equal',
'failed', 'fanta', 'fifteen', 'fillet', 'finch', 'fish', 'five', 'fix',
'floor', 'florida', 'football', 'four', 'fourteen', 'foxtrot', 'freddie',
'friend', 'fruit', 'gee', 'georgia', 'glucose', 'golf', 'green', 'grey',
'hamper', 'happy', 'harry', 'hawaii', 'helium', 'high', 'hot', 'hotel',
'hydrogen', 'idaho', 'illinois', 'india', 'indigo', 'ink', 'iowa',
'island', 'item', 'jersey', 'jig', 'johnny', 'juliet', 'july', 'jupiter',
'kansas', 'kentucky', 'kilo', 'king', 'kitten', 'lactose', 'lake', 'lamp',
'lemon', 'leopard', 'lima', 'lion', 'lithium', 'london', 'louisiana',
'low', 'magazine', 'magnesium', 'maine', 'mango', 'march', 'mars',
'maryland', 'massachusetts', 'may', 'mexico', 'michigan', 'mike',
'minnesota', 'mirror', 'mississippi', 'missouri', 'mobile', 'mockingbird',
'monkey', 'montana', 'moon', 'mountain', 'muppet', 'music', 'nebraska',
'neptune', 'network', 'nevada', 'nine', 'nineteen', 'nitrogen', 'north',
'november', 'nuts', 'october', 'ohio', 'oklahoma', 'one', 'orange',
'oranges', 'oregon', 'oscar', 'oven', 'oxygen', 'papa', 'paris', 'pasta',
'pennsylvania', 'pip', 'pizza', 'pluto', 'potato', 'princess', 'purple',
'quebec', 'queen', 'quiet', 'red', 'river', 'robert', 'robin', 'romeo',
'rugby', 'sad', 'salami', 'saturn', 'september', 'seven', 'seventeen',
'shade', 'sierra', 'single', 'sink', 'six', 'sixteen', 'skylark', 'snake',
'social', 'sodium', 'solar', 'south', 'spaghetti', 'speaker', 'spring',
'stairway', 'steak', 'stream', 'summer', 'sweet', 'table', 'tango', 'ten',
'tennessee', 'tennis', 'texas', 'thirteen', 'three', 'timing', 'triple',
'twelve', 'twenty', 'two', 'uncle', 'undress', 'uniform', 'uranus', 'utah',
'vegan', 'venus', 'vermont', 'victor', 'video', 'violet', 'virginia',
'washington', 'west', 'whiskey', 'white', 'william', 'winner', 'winter',
'wisconsin', 'wolfram', 'wyoming', 'xray', 'yankee', 'yellow', 'zebra',
'zulu')
class WordHasher(object):
def __init__(self):
"""Convert latitude and longitudes into human readable strings."""
self._symbols = "0123456789bcdefghjkmnpqrstuvwxyz"
self._decode_symbols = dict((ch, i) for (i, ch) in enumerate(self._symbols))
self._encode_symbols = dict((i, ch) for (i, ch) in enumerate(self._symbols))
self.six_wordlist = HUMAN_WORDLIST
self.four_wordlist = GOOGLE_4096WORDS
self.three_wordlist = GOOGLE_WORDLIST
def three_words(self, lat_long):
"""Convert coordinate to a combination of three words
The coordinate is defined by latitude and longitude
in degrees.
"""
lat, lon = lat_long
gh = geohash.encode(lat, lon, 9)
words = "-".join(self.three_wordlist[p] for p in self.to_rugbits(self.geo_to_int(gh)))
return words
def four_words(self, lat_long):
"""Convert coordinate to a combination of four words
The coordinate is defined by latitude and longitude
in degrees.
"""
lat, lon = lat_long
gh = geohash.encode(lat, lon, 9)
words = "-".join(self.four_wordlist[p] for p in self.to_quads(self.pad(gh)))
return words
def six_words(self, lat_long):
"""Convert coordinate to a combination of six words
The coordinate is defined by latitude and longitude
in degrees.
With six words the word list contains only words
which are short, easy to pronounce and easy distinguish.
"""
lat, lon = lat_long
gh = geohash.encode(lat, lon, 9)
words = "-".join(self.six_wordlist[p] for p in self.to_bytes(self.pad(gh)))
return words
def decode(self, words):
"""Decode `words` to latitude and longitude"""
words = words.split("-")
if len(words) == 3:
i = self.rugbits_to_int([self.three_wordlist.index(w) for w in words])
elif len(words) == 4:
i = self.quads_to_int([self.four_wordlist.index(w) for w in words])
i = self.unpad(i)
elif len(words) == 6:
i = self.bytes_to_int([self.six_wordlist.index(w) for w in words])
i = self.unpad(i)
else:
raise RuntimeError("Do not know how to decode a set of %i words."%(len(words)))
geo_hash = self.int_to_geo(i)
return geohash.decode(geo_hash)
def geo_to_int(self, geo_hash):
"""Decode `geo_hash` to an integer"""
base = len(self._symbols)
number = 0
for symbol in geo_hash:
number = number*base + self._decode_symbols[symbol]
return number
def int_to_geo(self, integer):
"""Encode `integer` to a geo hash"""
base = len(self._symbols)
symbols = []
while integer > 0:
remainder = integer % base
integer //= base
symbols.append(self._encode_symbols[remainder])
return ''.join(reversed(symbols))
def pad(self, geo_hash):
"""Pad nine character `geo_hash` to 48bit integer"""
assert len(geo_hash) == 9
return self.geo_to_int(geo_hash) * 8
def unpad(self, integer):<|fim▁hole|> """Convert a 48bit `integer` to a list of 6bytes"""
bytes = [integer & 0b11111111]
for n in range(1,6):
div = 2**(n*8)
bytes.append((integer//div) & 0b11111111)
bytes.reverse()
return bytes
def bytes_to_int(self, bytes):
"""Convert a list of 6`bytes` to an integer"""
assert len(bytes) == 6
N = 0
bytes.reverse()
for n,b in enumerate(bytes):
N += b * (2**(8*(n)))
return N
def to_quads(self, integer):
"""Convert a 48bit `integer` to a list of 4 quads"""
quads = [integer & 0b111111111111]
for n in range(1,4):
div = 2**(n*12)
quads.append((integer//div) & 0b111111111111)
quads.reverse()
return quads
def quads_to_int(self, quads):
"""Convert a list of four 12bit values to an integer"""
assert len(quads) == 4
N = 0
quads.reverse()
for n,b in enumerate(quads):
N += b * (2**(12*(n)))
return N
def to_rugbits(self, integer):
"""Convert a 45bit `integer` to a list of 3rugbits
A rugbit is like a byte but with 15bits instead of eight.
"""
fifteen_bits = 0b111111111111111
rugbits = [(integer//(2**30)) & fifteen_bits,
(integer//(2**15)) & fifteen_bits,
integer & fifteen_bits]
return rugbits
def rugbits_to_int(self, rugbits):
"""Convert a list of `rugbits` to an integer"""
assert len(rugbits) == 3
return (rugbits[0] *(2**30)) + (rugbits[1] *(2**15)) + (rugbits[2])
DEFAULT_HASHER = WordHasher()
three_words = DEFAULT_HASHER.three_words
four_words = DEFAULT_HASHER.four_words
six_words = DEFAULT_HASHER.six_words
decode = DEFAULT_HASHER.decode<|fim▁end|>
|
"""Remove 3bit of padding to get 45bit geo hash"""
return integer>>3
def to_bytes(self, integer):
|
<|file_name|>tf_record_dataset_serialization_test.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");<|fim▁hole|># you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TFRecordDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.contrib.data.python.kernel_tests import reader_dataset_ops_test_base
from tensorflow.contrib.data.python.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class TFRecordDatasetSerializationTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type == "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type == "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return core_readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
lambda: self._build_iterator_graph(num_epochs * 2, batch_size),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0), None,
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
if __name__ == "__main__":
test.main()<|fim▁end|>
| |
<|file_name|>data_masking.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
from .inception import InceptionDao
from .models import DataMaskingRules, DataMaskingColumns
from simplejson import JSONDecodeError
import simplejson as json
import re
inceptionDao = InceptionDao()
class Masking(object):
# 脱敏数据
def data_masking(self, cluster_name, db_name, sql, sql_result):
result = {'status': 0, 'msg': 'ok', 'data': []}
# 通过inception获取语法树,并进行解析
try:
print_info = self.query_tree(sql, cluster_name, db_name)
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
return result
if print_info is None:
result['status'] = 1
result['msg'] = 'inception返回的结果集为空!可能是SQL语句有语法错误,无法完成脱敏校验,如果需要继续查询请关闭校验'
elif print_info['errlevel'] != 0:
result['status'] = 2
result['msg'] = 'inception返回异常,无法完成脱敏校验,如果需要继续查询请关闭校验:\n' + print_info['errmsg']
else:
query_tree = print_info['query_tree']
# 获取命中脱敏规则的列数据
try:
table_hit_columns, hit_columns = self.analy_query_tree(query_tree, cluster_name)
except Exception as msg:
result['status'] = 2
result['msg'] = '解析inception语法树获取表信息出错,无法完成脱敏校验,如果需要继续查询请关闭校验:{}\nquery_tree:{}'.format(str(msg),
print_info)
return result
# 存在select * 的查询,遍历column_list,获取命中列的index,添加到hit_columns
if table_hit_columns and sql_result.get('rows'):
column_list = sql_result['column_list']
table_hit_column = {}
for column_info in table_hit_columns:
table_hit_column_info = {}
rule_type = column_info['rule_type']
table_hit_column_info[column_info['column_name']] = rule_type
table_hit_column.update(table_hit_column_info)
for index, item in enumerate(column_list):
if item in table_hit_column.keys():
column = {}
column['column_name'] = item
column['index'] = index
column['rule_type'] = table_hit_column.get(item)
hit_columns.append(column)
# 对命中规则列hit_columns的数据进行脱敏
# 获取全部脱敏规则信息,减少循环查询,提升效率
DataMaskingRulesOb = DataMaskingRules.objects.all()
if hit_columns and sql_result.get('rows'):
rows = list(sql_result['rows'])
for column in hit_columns:
index = column['index']
for idx, item in enumerate(rows):
rows[idx] = list(item)
rows[idx][index] = self.regex(DataMaskingRulesOb, column['rule_type'], rows[idx][index])
sql_result['rows'] = rows
return result
# 通过inception获取语法树
def query_tree(self, sqlContent, cluster_name, dbName):
try:
print_info = inceptionDao.query_print(sqlContent, cluster_name, dbName)
except Exception as e:
raise Exception('通过inception获取语法树异常,请检查inception配置,并确保inception可以访问实例:' + str(e))
if print_info:
id = print_info[0][0]
statement = print_info[0][1]
# 返回值为非0的情况下,说明是有错的,1表示警告,不影响执行,2表示严重错误,必须修改
errlevel = print_info[0][2]
query_tree = print_info[0][3]
errmsg = print_info[0][4]
# 提交给inception语法错误的情况
if errmsg == 'Global environment':
errlevel = 2
errmsg = 'Global environment: ' + query_tree
if errlevel == 0:
pass<|fim▁hole|> else:
return None
# 解析语法树,获取语句涉及的表,用于查询权限限制
def query_table_ref(self, sqlContent, cluster_name, dbName):
result = {'status': 0, 'msg': 'ok', 'data': []}
try:
print_info = self.query_tree(sqlContent, cluster_name, dbName)
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
return result
if print_info is None:
result['status'] = 1
result['msg'] = 'inception返回的结果集为空!可能是SQL语句有语法错误,无法校验表权限,如果需要继续查询请关闭校验'
elif print_info['errlevel'] != 0:
result['status'] = 2
result['msg'] = 'inception返回异常,无法校验表权限,如果需要继续查询请关闭校验:\n' + print_info['errmsg']
else:
try:
table_ref = json.loads(print_info['query_tree'])['table_ref']
except Exception:
try:
table_ref = json.loads(print_info['query_tree'])['table_ref']
except JSONDecodeError:
try:
table_ref = json.loads(repair_json_str(print_info['query_tree']))['table_ref']
except JSONDecodeError as msg:
result['status'] = 2
result['msg'] = '通过inception语法树解析表信息出错,无法校验表权限,如果需要继续查询请关闭校验:{}\nquery_tree:{}'.format(str(msg),
print_info)
table_ref = ''
result['data'] = table_ref
return result
# 解析query_tree,获取语句信息,并返回命中脱敏规则的列信息
def analy_query_tree(self, query_tree, cluster_name):
try:
query_tree_dict = json.loads(query_tree)
except JSONDecodeError:
query_tree_dict = json.loads(repair_json_str(query_tree))
select_list = query_tree_dict.get('select_list')
table_ref = query_tree_dict.get('table_ref')
# 获取全部脱敏字段信息,减少循环查询,提升效率
DataMaskingColumnsOb = DataMaskingColumns.objects.all()
# 判断语句涉及的表是否存在脱敏字段配置
is_exist = False
for table in table_ref:
if DataMaskingColumnsOb.filter(cluster_name=cluster_name,
table_schema=table['db'],
table_name=table['table'],
active=1).exists():
is_exist = True
# 不存在脱敏字段则直接跳过规则解析
if is_exist:
# 遍历select_list
columns = []
hit_columns = [] # 命中列
table_hit_columns = [] # 涉及表命中的列,仅select *需要
# 判断是否存在不支持脱敏的语法
for select_item in select_list:
if select_item['type'] not in ('FIELD_ITEM', 'aggregate'):
raise Exception('不支持该查询语句脱敏!')
if select_item['type'] == 'aggregate':
if select_item['aggregate'].get('type') not in ('FIELD_ITEM', 'INT_ITEM'):
raise Exception('不支持该查询语句脱敏!')
# 获取select信息的规则,仅处理type为FIELD_ITEM和aggregate类型的select信息,如[*],[*,column_a],[column_a,*],[column_a,a.*,column_b],[a.*,column_a,b.*],
select_index = [
select_item['field'] if select_item['type'] == 'FIELD_ITEM' else select_item['aggregate'].get('field')
for
select_item in select_list if select_item['type'] in ('FIELD_ITEM', 'aggregate')]
# 处理select_list,为统一的{'type': 'FIELD_ITEM', 'db': 'archer_master', 'table': 'sql_users', 'field': 'email'}格式
select_list = [select_item if select_item['type'] == 'FIELD_ITEM' else select_item['aggregate'] for
select_item in select_list if select_item['type'] in ('FIELD_ITEM', 'aggregate')]
if select_index:
# 如果发现存在field='*',则遍历所有表,找出所有的命中字段
if '*' in select_index:
# 涉及表命中的列
for table in table_ref:
hit_columns_info = self.hit_table(DataMaskingColumnsOb, cluster_name, table['db'],
table['table'])
table_hit_columns.extend(hit_columns_info)
# 几种不同查询格式
# [*]
if re.match(r"^(\*,?)+$", ','.join(select_index)):
hit_columns = []
# [*,column_a]
elif re.match(r"^(\*,)+(\w,?)+$", ','.join(select_index)):
# 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,index采取后切片
for index, item in enumerate(select_list):
item['index'] = index - len(select_list)
if item.get('field') != '*':
columns.append(item)
# [column_a, *]
elif re.match(r"^(\w,?)+(\*,?)+$", ','.join(select_index)):
# 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,index采取前切片
for index, item in enumerate(select_list):
item['index'] = index
if item.get('field') != '*':
columns.append(item)
# [column_a,a.*,column_b]
elif re.match(r"^(\w,?)+(\*,?)+(\w,?)+$", ','.join(select_index)):
# 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,*前面的字段index采取前切片,*后面的字段采取后切片
for index, item in enumerate(select_list):
item['index'] = index
if item.get('field') == '*':
first_idx = index
break
select_list.reverse()
for index, item in enumerate(select_list):
item['index'] = index
if item.get('field') == '*':
last_idx = len(select_list) - index - 1
break
select_list.reverse()
for index, item in enumerate(select_list):
if item.get('field') != '*' and index < first_idx:
item['index'] = index
if item.get('field') != '*' and index > last_idx:
item['index'] = index - len(select_list)
columns.append(item)
# [a.*, column_a, b.*]
else:
raise Exception('不支持select信息为[a.*, column_a, b.*]格式的查询脱敏!')
# 没有*的查询,直接遍历查询命中字段,query_tree的列index就是查询语句列的index
else:
for index, item in enumerate(select_list):
item['index'] = index
if item.get('field') != '*':
columns.append(item)
# 格式化命中的列信息
for column in columns:
hit_info = self.hit_column(DataMaskingColumnsOb, cluster_name, column.get('db'), column.get('table'),
column.get('field'))
if hit_info['is_hit']:
hit_info['index'] = column['index']
hit_columns.append(hit_info)
else:
table_hit_columns = None
hit_columns = None
return table_hit_columns, hit_columns
# 判断字段是否命中脱敏规则,如果命中则返回脱敏的规则id和规则类型
def hit_column(self, DataMaskingColumnsOb, cluster_name, table_schema, table_name, column_name):
column_info = DataMaskingColumnsOb.filter(cluster_name=cluster_name, table_schema=table_schema,
table_name=table_name, column_name=column_name, active=1)
hit_column_info = {}
hit_column_info['cluster_name'] = cluster_name
hit_column_info['table_schema'] = table_schema
hit_column_info['table_name'] = table_name
hit_column_info['column_name'] = column_name
hit_column_info['rule_type'] = 0
hit_column_info['is_hit'] = False
# 命中规则
if column_info:
hit_column_info['rule_type'] = column_info[0].rule_type
hit_column_info['is_hit'] = True
return hit_column_info
# 获取表中所有命中脱敏规则的字段信息
def hit_table(self, DataMaskingColumnsOb, cluster_name, table_schema, table_name):
columns_info = DataMaskingColumnsOb.filter(cluster_name=cluster_name, table_schema=table_schema,
table_name=table_name, active=1)
# 命中规则
hit_columns_info = []
for column in columns_info:
hit_column_info = {}
hit_column_info['cluster_name'] = cluster_name
hit_column_info['table_schema'] = table_schema
hit_column_info['table_name'] = table_name
hit_column_info['is_hit'] = True
hit_column_info['column_name'] = column.column_name
hit_column_info['rule_type'] = column.rule_type
hit_columns_info.append(hit_column_info)
return hit_columns_info
# 利用正则表达式脱敏数据
def regex(self, DataMaskingRulesOb, rule_type, value):
rules_info = DataMaskingRulesOb.get(rule_type=rule_type)
if rules_info:
rule_regex = rules_info.rule_regex
hide_group = rules_info.hide_group
# 正则匹配必须分组,隐藏的组会使用****代替
try:
p = re.compile(rule_regex)
m = p.search(str(value))
masking_str = ''
for i in range(m.lastindex):
if i == hide_group - 1:
group = '****'
else:
group = m.group(i + 1)
masking_str = masking_str + group
return masking_str
except Exception:
return value
else:
return value
def repair_json_str(json_str):
# 处理JSONDecodeError: Expecting property name enclosed in double quotes
# inception语法树出现{"a":1,}、["a":1,]、{'a':1}、[, { }]
json_str = re.sub(r"{\s*'(.+)':", r'{"\1":', json_str)
json_str = re.sub(r",\s*?]", "]", json_str)
json_str = re.sub(r",\s*?}", "}", json_str)
json_str = re.sub(r"\[,\s*?{", "[{", json_str)
json_str = json_str.replace("'", "\"")
return json_str<|fim▁end|>
|
# print(json.dumps(json.loads(query_tree), indent=4, sort_keys=False, ensure_ascii=False))
return {'id': id, 'statement': statement, 'errlevel': errlevel, 'query_tree': query_tree,
'errmsg': errmsg}
|
<|file_name|>hgrid.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import os
from flask import request
from website.addons.dataverse.client import get_study, get_files, \
get_dataverse, connect_from_settings
from website.project.decorators import must_be_contributor_or_public
from website.project.decorators import must_have_addon
from website.util import rubeus
def dataverse_hgrid_root(node_addon, auth, state=None, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
default_state = 'released'
state = 'released' if not node.can_edit(auth) else state or default_state
connection = connect_from_settings(user_settings)
# Quit if no study linked
if node_addon.study_hdl is None or connection is None:
return []
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
# Quit if hdl does not produce a study
if study is None:
return []
released_files = get_files(study, released=True)
authorized = node.can_edit(auth)
# Produce draft version or quit if no released version is available
if not released_files:
if authorized:
state = 'draft'
else:
return []
study_name = node_addon.study
if len(study_name) > 23:
study_name = u'{0}...'.format(study_name[:20])
permissions = {
'edit': node.can_edit(auth) and not node.is_registration,
'view': node.can_view(auth)
}
urls = {
'upload': node.api_url_for('dataverse_upload_file'),
'fetch': node.api_url_for('dataverse_hgrid_data_contents', state=state),
'state': node.api_url_for('dataverse_root_folder_public'),
'release': node.api_url_for('dataverse_release_study'),
}
buttons = [rubeus.build_addon_button(
'<i class="fa fa-globe"></i> Release Study',
'releaseStudy')] if state == 'draft' else None
return [rubeus.build_addon_root(
node_addon,
study_name,
urls=urls,
permissions=permissions,
buttons=buttons,
study=study_name,
doi=study.doi,
dataverse=dataverse.title,
citation=study.citation,
hasReleasedFiles=bool(released_files),
state=state,
)]
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_root_folder_public(node_addon, auth, **kwargs):
state = request.args['state']
return dataverse_hgrid_root(node_addon, auth=auth, state=state)
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_hgrid_data_contents(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
state = request.args.get('state')
default_state = 'released'
state = 'released' if not node.can_edit(auth) else state or default_state
released = state == 'released'<|fim▁hole|>
can_edit = node.can_edit(auth) and not node.is_registration and not released
can_view = node.can_view(auth)
connection = connect_from_settings(user_settings)
if node_addon.study_hdl is None or connection is None:
return []
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
# Quit if hdl does not produce a study
if study is None:
return []
info = []
for f in get_files(study, released):
item = {
'addon': 'dataverse',
'provider': 'dataverse',
rubeus.KIND: 'file',
'name': f.name,
'path': f.name,
'file_id': f.id,
'ext': os.path.splitext(f.name)[1],
'urls': {
'view': node.web_url_for('dataverse_view_file',
path=f.id),
'download': node.web_url_for('dataverse_download_file',
path=f.id),
'delete': node.api_url_for('dataverse_delete_file',
path=f.id),
},
'permissions': {
'view': can_view,
'edit': can_edit,
},
}
info.append(item)
return {'data': info}<|fim▁end|>
| |
<|file_name|>test_netapp_nfs.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')<|fim▁hole|> def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()<|fim▁end|>
|
if share:
self.fail('Unexpected share detected.')
|
<|file_name|>resource_projection_spec.py<|end_file_name|><|fim▁begin|># Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that creates resource projection specification."""
import sys
from googlecloudsdk.third_party.py27 import py27_copy as copy
PROJECTION_ARG_DOC = ' projection: The parent ProjectionSpec.'
ALIGN_DEFAULT = 'left'
ALIGNMENTS = {'left': lambda s, w: s.ljust(w),
'center': lambda s, w: s.center(w),
'right': lambda s, w: s.rjust(w)}
class ProjectionSpec(object):
"""Creates a resource projection specification.
A resource projection is an expression string that contains a list of resource
keys with optional attributes. A projector is a method that takes a projection
specification and a resource object as input and produces a new
JSON-serializable object containing only the values corresponding to the keys
in the projection specification.
Optional projection key attributes may transform the values in the output
JSON-serializable object. Cloud SDK projection attributes are used for output
formatting.
A default or empty projection expression still produces a projector that
converts a resource to a JSON-serializable object.
This class is used by the resource projection expression parser to create a
resource projection specification from a projection expression string.
Attributes:
aliases: The short key name alias dictionary.
_active: The transform active level. Incremented each time Defaults() is
called. Used to determine active transforms.
attributes: Projection attributes dict indexed by attribute name.
_columns: A list of (key,_Attribute) tuples used to project a resource to
a list of columns.
_compiler: The projection compiler method for nested projections.
_empty: An empty projection _Tree used by Projector().
_name: The projection name from the expression string.
_tree: The projection _Tree root, used by
resource_projector.Evaluate() to efficiently project each resource.
symbols: Default and caller-defined transform function dict indexed by
function name.
"""
DEFAULT = 0 # _Attribute default node flag.
INNER = 1 # _Attribute inner node flag.
PROJECT = 2 # _Attribute project node flag.
class _Column(object):
"""Column key and transform attribute for self._columns.
Attributes:
key: The column key.
attribute: The column key _Attribute.
"""
def __init__(self, key, attribute):
self.key = key
self.attribute = attribute
def __init__(self, defaults=None, symbols=None, compiler=None):
"""Initializes a projection.
Args:
defaults: resource_projection_spec.ProjectionSpec defaults.
symbols: Transform function symbol table dict indexed by function name.
compiler: The projection compiler method for nested projections.
"""
self.aliases = {}
self.attributes = {}
self._columns = []
self._compiler = compiler
self._empty = None
self._name = None
self._snake_headings = {}
self._snake_re = None
if defaults:
self._active = defaults.active
self._tree = copy.deepcopy(defaults.GetRoot())
self.Defaults()
if defaults.symbols:
self.symbols = copy.deepcopy(defaults.symbols)
if symbols:
self.symbols.update(symbols)
else:
self.symbols = symbols if symbols else {}
self.aliases.update(defaults.aliases)
else:
self._active = 0
self._tree = None
self.symbols = symbols
@property
def active(self):
"""Gets the transform active level."""
return self._active
@property
def compiler(self):
"""Returns the projection compiler method for nested projections."""
return self._compiler
def _Defaults(self, projection):
"""Defaults() helper -- converts a projection to a default projection.
Args:
projection: A node in the original projection _Tree.
"""
projection.attribute.flag = self.DEFAULT
for node in projection.tree.values():
self._Defaults(node)
def _Print(self, projection, out, level):
"""Print() helper -- prints projection node p and its children.
Args:
projection: A _Tree node in the original projection.
out: The output stream.
level: The nesting level counting from 1 at the root.
"""
for key in projection.tree:
out.write('{indent} {key} : {attribute}\n'.format(
indent=' ' * level,
key=key,
attribute=projection.tree[key].attribute))
self._Print(projection.tree[key], out, level + 1)
def AddAttribute(self, name, value):
"""Adds name=value to the attributes.
Args:
name: The attribute name.
value: The attribute value
"""
self.attributes[name] = value
def DelAttribute(self, name):
"""Deletes name from the attributes if it is in the attributes.
Args:
name: The attribute name.
"""
if name in self.attributes:
del self.attributes[name]
def AddAlias(self, name, key):
"""Adds name as an alias for key to the projection.
Args:
name: The short (no dots) alias name for key.
key: The parsed key to add.
"""
self.aliases[name] = key
def AddKey(self, key, attribute):
"""Adds key and attribute to the projection.
Args:
key: The parsed key to add.
attribute: Parsed _Attribute to add.
"""
self._columns.append(self._Column(key, attribute))
def SetName(self, name):
"""Sets the projection name.
The projection name is the rightmost of the names in the expression.
Args:<|fim▁hole|> name: The projection name.
"""
if self._name:
# Reset the name-specific attributes.
self.attributes = {}
self._name = name
def GetRoot(self):
"""Returns the projection root node.
Returns:
The resource_projector_parser._Tree root node.
"""
return self._tree
def SetRoot(self, root):
"""Sets the projection root node.
Args:
root: The resource_projector_parser._Tree root node.
"""
self._tree = root
def GetEmpty(self):
"""Returns the projector resource_projector_parser._Tree empty node.
Returns:
The projector resource_projector_parser._Tree empty node.
"""
return self._empty
def SetEmpty(self, node):
"""Sets the projector resource_projector_parser._Tree empty node.
The empty node is used by to apply [] empty slice projections.
Args:
node: The projector resource_projector_parser._Tree empty node.
"""
self._empty = node
def Columns(self):
"""Returns the projection columns.
Returns:
The columns in the projection, None if the entire resource is projected.
"""
return self._columns
def ColumnCount(self):
"""Returns the number of columns in the projection.
Returns:
The number of columns in the projection, 0 if the entire resource is
projected.
"""
return len(self._columns)
def Defaults(self):
"""Converts the projection to a default projection.
A default projection provides defaults for attribute values and function
symbols. An explicit non-default projection value always overrides the
corresponding default value.
"""
if self._tree:
self._Defaults(self._tree)
self._columns = []
self._active += 1
def Aliases(self):
"""Returns the short key name alias dictionary.
This dictionary maps short (no dots) names to parsed keys.
Returns:
The short key name alias dictionary.
"""
return self.aliases
def Attributes(self):
"""Returns the projection _Attribute dictionary.
Returns:
The projection _Attribute dictionary.
"""
return self.attributes
def Alignments(self):
"""Returns the projection column justfication list.
Returns:
The ordered list of alignment functions, where each function is one of
ljust [default], center, or rjust.
"""
return [ALIGNMENTS[col.attribute.align] for col in self._columns]
def Labels(self):
"""Returns the ordered list of projection labels.
Returns:
The ordered list of projection label strings, None if all labels are
empty.
"""
labels = [col.attribute.label or '' for col in self._columns]
return labels if any(labels) else None
def Name(self):
"""Returns the projection name.
The projection name is the rightmost of the names in the expression.
Returns:
The projection name, None if none was specified.
"""
return self._name
def Order(self):
"""Returns the projection sort key order suitable for use by sorted().
Example:
projection = resource_projector.Compile('...')
order = projection.Order()
if order:
rows = sorted(rows, key=itemgetter(*order))
Returns:
The list of (sort-key-index, reverse), [] if projection is None
or if all sort order indices in the projection are None (unordered).
"""
ordering = []
for i, col in enumerate(self._columns):
if col.attribute.order or col.attribute.reverse:
ordering.append(
(col.attribute.order or sys.maxint, i, col.attribute.reverse))
return [(i, reverse) for _, i, reverse in sorted(ordering)]
def Print(self, out=sys.stdout):
"""Prints the projection with indented nesting.
Args:
out: The output stream, sys.stdout if None.
"""
if self._tree:
self._Print(self._tree, out, 1)
def Tree(self):
"""Returns the projection tree root.
Returns:
The projection tree root.
"""
return self._tree<|fim▁end|>
| |
<|file_name|>Course.java<|end_file_name|><|fim▁begin|>package gradebookdata;
/**
* Represents one row of the course table in the GradeBook database
*
* @author Eric Carlton<|fim▁hole|> private String name;
private int weight;
private int ID;
/**
* Create a course with all fields filled
*
* @param name
* name of course
* @param weight
* credit hours ( or weight ) of course
* @param ID
* course_ID in course table
*/
public Course(String name, int weight, int ID) {
this.name = name;
this.weight = weight;
this.ID = ID;
}
/**
* Create a generic course
*/
public Course() {
this("no name", 0, -1);
}
public String getName() {
return name;
}
public Integer getWeight() {
return weight;
}
public Integer getID() {
return ID;
}
/**
* Returns a string formatted as:
* course_name
* course_weight hour(s)
*/
@Override
public String toString() {
String result = name + "\n" + weight;
if (weight == 1)
result = result + " hour";
else
result = result + " hours";
return result;
}
}<|fim▁end|>
|
*
*/
public class Course {
|
<|file_name|>test_rfc4043.py<|end_file_name|><|fim▁begin|>#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc4043
class PermIdCertTestCase(unittest.TestCase):
cert_pem_text = """\
MIIDDTCCApOgAwIBAgIJAKWzVCgbsG5HMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
dXMgQ0EwHhcNMTkxMTEwMDA0MDIyWhcNMjAxMTA5MDA0MDIyWjBNMQswCQYDVQQG
EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
YW1wbGUxDTALBgNVBAMTBEdhaWwwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQBoktg
/68xL+uEQaWBoHyOjw8EMLeMEng3R2H7yiEzTGoaMJgPOKvSfzB2P0paHYPL+B5y
Gc0CK5EHRujMl9ljH+Wydpk57rKBLo1ZzpWUS6anLGIkWs1sOakcgGGr7hGjggFL
MIIBRzAdBgNVHQ4EFgQU1pCNZuMzfEaJ9GGhH7RKy6Mvz+cwbwYDVR0jBGgwZoAU
8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQI
DAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQDokdYG
kU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0E
NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
cnBvc2UuMFMGA1UdEQRMMEqgNgYIKwYBBQUHCAOgKjAoDBs4MjYyMDgtNDE3MDI4
LTU0ODE5NS0yMTUyMzMGCSsGAQQBgaxgMIEQZ2FpbEBleGFtcGxlLmNvbTAKBggq
hkjOPQQDAwNoADBlAjBT+36Y/LPaGSu+61P7kR97M8jAjtH5DtUwrWR02ChshvYJ
x0bpZq3PJaO0WlBgFicCMQCf+67wSvjxxtjI/OAg4t8NQIJW1LcehSXizlPDc772
/FC5OiUAxO+iFaSVMeDFsCo=
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
perm_id_oid = rfc4043.id_on_permanentIdentifier
assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')<|fim▁hole|> if extn['extnID'] == rfc5280.id_ce_subjectAltName:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
self.assertFalse(rest)
self.assertTrue(extnValue.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
for gn in extnValue:
if gn['otherName'].hasValue():
self.assertEqual(perm_id_oid, gn['otherName']['type-id'])
onValue, rest = der_decoder(
gn['otherName']['value'],
asn1Spec=rfc4043.PermanentIdentifier())
self.assertFalse(rest)
self.assertTrue(onValue.prettyPrint())
self.assertEqual(gn['otherName']['value'], der_encoder(onValue))
self.assertEqual(assigner_oid, onValue['assigner'])
permanent_identifier_found = True
self.assertTrue(permanent_identifier_found)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
perm_id_oid = rfc4043.id_on_permanentIdentifier
assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
permanent_identifier_found = False
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc5280.id_ce_subjectAltName:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(extnValue.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
for gn in extnValue:
if gn['otherName'].hasValue():
on = gn['otherName']
self.assertEqual(perm_id_oid, on['type-id'])
self.assertEqual(assigner_oid, on['value']['assigner'])
permanent_identifier_found = True
self.assertTrue(permanent_identifier_found)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())<|fim▁end|>
|
permanent_identifier_found = False
for extn in asn1Object['tbsCertificate']['extensions']:
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>"""
Build extension modules, package and install Fatiando.
"""
import sys
import os
from setuptools import setup, Extension, find_packages
import numpy
# Get the version number and setup versioneer
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'fatiando/_version.py'
versioneer.versionfile_build = 'fatiando/_version.py'
versioneer.tag_prefix = 'v'
versioneer.parentdir_prefix = '.'
NAME = 'fatiando'
FULLNAME = 'Fatiando a Terra'
DESCRIPTION = "Modeling and inversion for geophysics"
AUTHOR = "Leonardo Uieda"
AUTHOR_EMAIL = '[email protected]'
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
VERSION = versioneer.get_version()
CMDCLASS = versioneer.get_cmdclass()
with open("README.rst") as f:
LONG_DESCRIPTION = ''.join(f.readlines())
PACKAGES = find_packages(exclude=['doc', 'ci', 'cookbook', 'gallery'])
LICENSE = "BSD 3-clause"
URL = "http://www.fatiando.org"
PLATFORMS = "Any"
SCRIPTS = []
PACKAGE_DATA = {'fatiando': [os.path.join('data', '*')]}
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: {}".format(LICENSE),
]
KEYWORDS = 'geophysics modeling inversion gravimetry seismic magnetometry'
<|fim▁hole|>USE_CYTHON = True if '--cython' in sys.argv else False
ext = '.pyx' if USE_CYTHON else '.c'
libs = []
if os.name == 'posix':
libs.append('m')
C_EXT = [[['fatiando', 'seismic', '_ttime2d'], {}],
[['fatiando', 'seismic', '_wavefd'], {}],
[['fatiando', 'gravmag', '_polyprism'], {}],
[['fatiando', 'gravmag', '_sphere'], {}],
[['fatiando', 'gravmag', '_prism'], {}],
]
extensions = []
for e, extra_args in C_EXT:
extensions.append(
Extension('.'.join(e), [os.path.join(*e) + ext],
libraries=libs,
include_dirs=[numpy.get_include()],
**extra_args))
if USE_CYTHON:
sys.argv.remove('--cython')
from Cython.Build import cythonize
extensions = cythonize(extensions)
if __name__ == '__main__':
setup(name=NAME,
fullname=FULLNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
platforms=PLATFORMS,
scripts=SCRIPTS,
packages=PACKAGES,
ext_modules=extensions,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
cmdclass=CMDCLASS)<|fim▁end|>
|
# The running setup.py with --cython, then set things up to generate the Cython
# .c files. If not, then compile the pre-converted C files.
|
<|file_name|>scheduler_hints.py<|end_file_name|><|fim▁begin|>#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cfg.CONF.import_opt('stack_scheduler_hints', 'heat.common.config')
class SchedulerHintsMixin(object):
'''
Utility class to encapsulate Scheduler Hint related logic shared
between resources.
'''
HEAT_ROOT_STACK_ID = 'heat_root_stack_id'
HEAT_STACK_ID = 'heat_stack_id'
HEAT_STACK_NAME = 'heat_stack_name'
HEAT_PATH_IN_STACK = 'heat_path_in_stack'
HEAT_RESOURCE_NAME = 'heat_resource_name'
HEAT_RESOURCE_UUID = 'heat_resource_uuid'
def _scheduler_hints(self, scheduler_hints):
'''Augment scheduler hints with supplemental content.'''
if cfg.CONF.stack_scheduler_hints:
if scheduler_hints is None:<|fim▁hole|> scheduler_hints[self.HEAT_ROOT_STACK_ID] = \
self.stack.root_stack_id()
scheduler_hints[self.HEAT_STACK_ID] = self.stack.id
scheduler_hints[self.HEAT_STACK_NAME] = self.stack.name
scheduler_hints[self.HEAT_PATH_IN_STACK] = \
self.stack.path_in_stack()
scheduler_hints[self.HEAT_RESOURCE_NAME] = self.name
scheduler_hints[self.HEAT_RESOURCE_UUID] = self.uuid
return scheduler_hints<|fim▁end|>
|
scheduler_hints = {}
|
<|file_name|>udf.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
User-defined function related classes and functions
"""
import functools
import sys
from typing import Callable, Any, TYPE_CHECKING, Optional, cast, Union
from py4j.java_gateway import JavaObject # type: ignore[import]
from pyspark import SparkContext
from pyspark.rdd import _prepare_for_python_RDD, PythonEvalType # type: ignore[attr-defined]
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.types import (
StringType,
DataType,
StructType,
_parse_datatype_string,
)
from pyspark.sql.pandas.types import to_arrow_type
if TYPE_CHECKING:
from pyspark.sql._typing import DataTypeOrString, ColumnOrName, UserDefinedFunctionLike
from pyspark.sql.session import SparkSession
__all__ = ["UDFRegistration"]
def _wrap_function(
sc: SparkContext, func: Callable[..., Any], returnType: "DataTypeOrString"
) -> JavaObject:
command = (func, returnType)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction( # type: ignore[attr-defined]
bytearray(pickled_command),
env,
includes,
sc.pythonExec, # type: ignore[attr-defined]
sc.pythonVer, # type: ignore[attr-defined]
broadcast_vars,
sc._javaAccumulator, # type: ignore[attr-defined]
)
def _create_udf(
f: Callable[..., Any],
returnType: "DataTypeOrString",
evalType: int,
name: Optional[str] = None,
deterministic: bool = True,
) -> "UserDefinedFunctionLike":
# Set the name of the UserDefinedFunction object to be the name of function f
udf_obj = UserDefinedFunction(
f, returnType=returnType, name=name, evalType=evalType, deterministic=deterministic
)
return udf_obj._wrapped()
class UserDefinedFunction(object):
"""
User defined function in Python
.. versionadded:: 1.3
Notes
-----
The constructor of this class is not supposed to be directly called.
Use :meth:`pyspark.sql.functions.udf` or :meth:`pyspark.sql.functions.pandas_udf`
to create this instance.
"""
def __init__(
self,
func: Callable[..., Any],
returnType: "DataTypeOrString" = StringType(),
name: Optional[str] = None,
evalType: int = PythonEvalType.SQL_BATCHED_UDF,
deterministic: bool = True,
):
if not callable(func):
raise TypeError(
"Invalid function: not a function or callable (__call__ is not defined): "
"{0}".format(type(func))
)
if not isinstance(returnType, (DataType, str)):
raise TypeError(
"Invalid return type: returnType should be DataType or str "
"but is {}".format(returnType)
)
if not isinstance(evalType, int):
raise TypeError(
"Invalid evaluation type: evalType should be an int but is {}".format(evalType)
)
self.func = func
self._returnType = returnType
# Stores UserDefinedPythonFunctions jobj, once initialized
self._returnType_placeholder: Optional[DataType] = None
self._judf_placeholder = None
self._name = name or (
func.__name__ if hasattr(func, "__name__") else func.__class__.__name__
)
self.evalType = evalType
self.deterministic = deterministic
@property
def returnType(self) -> DataType:
# This makes sure this is called after SparkContext is initialized.
# ``_parse_datatype_string`` accesses to JVM for parsing a DDL formatted string.
if self._returnType_placeholder is None:
if isinstance(self._returnType, DataType):
self._returnType_placeholder = self._returnType
else:
self._returnType_placeholder = _parse_datatype_string(self._returnType)
if (
self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF
or self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with scalar Pandas UDFs: %s is "
"not supported" % str(self._returnType_placeholder)
)
elif self.evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with grouped map Pandas UDFs or "
"at groupby.applyInPandas: %s is not supported"
% str(self._returnType_placeholder)
)
else:
raise TypeError(
"Invalid return type for grouped map Pandas "
"UDFs or at groupby.applyInPandas: return type must be a "
"StructType."
)
elif (
self.evalType == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
or self.evalType == PythonEvalType.SQL_MAP_ARROW_ITER_UDF
):
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type in mapInPandas: "
"%s is not supported" % str(self._returnType_placeholder)
)
else:
raise TypeError(
"Invalid return type in mapInPandas/mapInArrow: "
"return type must be a StructType."
)
elif self.evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type in cogroup.applyInPandas: "
"%s is not supported" % str(self._returnType_placeholder)
)
else:
raise TypeError(
"Invalid return type in cogroup.applyInPandas: "
"return type must be a StructType."
)
elif self.evalType == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
try:
# StructType is not yet allowed as a return type, explicitly check here to fail fast
if isinstance(self._returnType_placeholder, StructType):
raise TypeError
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with grouped aggregate Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder)
)
return self._returnType_placeholder
@property
def _judf(self) -> JavaObject:
# It is possible that concurrent access, to newly created UDF,
# will initialize multiple UserDefinedPythonFunctions.
# This is unlikely, doesn't affect correctness,
# and should have a minimal performance impact.
if self._judf_placeholder is None:
self._judf_placeholder = self._create_judf()
return self._judf_placeholder
def _create_judf(self) -> JavaObject:
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
wrapped_func = _wrap_function(sc, self.func, self.returnType)
jdt = spark._jsparkSession.parseDataType(self.returnType.json())
judf = sc._jvm.org.apache.spark.sql.execution.python.UserDefinedPythonFunction( # type: ignore[attr-defined]
self._name, wrapped_func, jdt, self.evalType, self.deterministic
)
return judf
def __call__(self, *cols: "ColumnOrName") -> Column:
judf = self._judf
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
return Column(judf.apply(_to_seq(sc, cols, _to_java_column)))
# This function is for improving the online help system in the interactive interpreter.
# For example, the built-in help / pydoc.help. It wraps the UDF with the docstring and
# argument annotation. (See: SPARK-19161)
def _wrapped(self) -> "UserDefinedFunctionLike":
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != "__name__" and a != "__module__"
)
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args: "ColumnOrName") -> Column:
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (
self.func.__module__
if hasattr(self.func, "__module__")
else self.func.__class__.__module__
)
wrapper.func = self.func # type: ignore[attr-defined]
wrapper.returnType = self.returnType # type: ignore[attr-defined]
wrapper.evalType = self.evalType # type: ignore[attr-defined]
wrapper.deterministic = self.deterministic # type: ignore[attr-defined]
wrapper.asNondeterministic = functools.wraps( # type: ignore[attr-defined]
self.asNondeterministic
)(lambda: self.asNondeterministic()._wrapped())
wrapper._unwrapped = self # type: ignore[attr-defined]
return wrapper # type: ignore[return-value]
def asNondeterministic(self) -> "UserDefinedFunction":
"""
Updates UserDefinedFunction to nondeterministic.
.. versionadded:: 2.3
"""
# Here, we explicitly clean the cache to create a JVM UDF instance
# with 'deterministic' updated. See SPARK-23233.
self._judf_placeholder = None
self.deterministic = False
return self
class UDFRegistration(object):
"""
Wrapper for user-defined function registration. This instance can be accessed by
:attr:`spark.udf` or :attr:`sqlContext.udf`.
.. versionadded:: 1.3.1
"""
def __init__(self, sparkSession: "SparkSession"):
self.sparkSession = sparkSession
def register(
self,
name: str,
f: Union[Callable[..., Any], "UserDefinedFunctionLike"],
returnType: Optional["DataTypeOrString"] = None,
) -> "UserDefinedFunctionLike":
"""Register a Python function (including lambda function) or a user-defined function
as a SQL function.
.. versionadded:: 1.3.1
Parameters
----------
name : str,
name of the user-defined function in SQL statements.
f : function, :meth:`pyspark.sql.functions.udf` or :meth:`pyspark.sql.functions.pandas_udf`
a Python function, or a user-defined function. The user-defined function can
be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and
:meth:`pyspark.sql.functions.pandas_udf`.
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the registered user-defined function. The value can
be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
`returnType` can be optionally specified when `f` is a Python function but not
when `f` is a user-defined function. Please see the examples below.
Returns
-------
function
a user-defined function
Notes
-----
To register a nondeterministic Python function, users need to first build
a nondeterministic user-defined function for the Python function and then register it
as a SQL function.
Examples
--------
1. When `f` is a Python function:
`returnType` defaults to string type and can be optionally specified. The produced
object must match the specified type. In this case, this API works as if
`register(name, f, returnType=StringType())`.
>>> strlen = spark.udf.register("stringLengthString", lambda x: len(x))
>>> spark.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)='4')]
>>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)='3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
2. When `f` is a user-defined function (from Spark 2.3.0):
Spark uses the return type of the given user-defined function as the return type of
the registered user-defined function. `returnType` should not be specified.
In this case, this API works as if `register(name, f)`.
>>> from pyspark.sql.types import IntegerType
>>> from pyspark.sql.functions import udf
>>> slen = udf(lambda s: len(s), IntegerType())
>>> _ = spark.udf.register("slen", slen)
>>> spark.sql("SELECT slen('test')").collect()
[Row(slen(test)=4)]
>>> import random
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import IntegerType
>>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic()
>>> new_random_udf = spark.udf.register("random_udf", random_udf)
>>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP
[Row(random_udf()=82)]
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf
>>> @pandas_udf("integer") # doctest: +SKIP
... def add_one(s: pd.Series) -> pd.Series:
... return s + 1
...
>>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP
>>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP
[Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)]
>>> @pandas_udf("integer") # doctest: +SKIP
... def sum_udf(v: pd.Series) -> int:
... return v.sum()
...
>>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP
>>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)]
"""
# This is to check whether the input function is from a user-defined function or
# Python function.
if hasattr(f, "asNondeterministic"):
if returnType is not None:
raise TypeError(
"Invalid return type: data type can not be specified when f is"
"a user-defined function, but got %s." % returnType
)
f = cast("UserDefinedFunctionLike", f)
if f.evalType not in [
PythonEvalType.SQL_BATCHED_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
]:
raise ValueError(
"Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF, "
"SQL_SCALAR_PANDAS_ITER_UDF or SQL_GROUPED_AGG_PANDAS_UDF."
)
register_udf = _create_udf(
f.func,
returnType=f.returnType,
name=name,
evalType=f.evalType,
deterministic=f.deterministic,
)._unwrapped # type: ignore[attr-defined]
return_udf = f
else:
if returnType is None:
returnType = StringType()
return_udf = _create_udf(
f, returnType=returnType, evalType=PythonEvalType.SQL_BATCHED_UDF, name=name
)
register_udf = return_udf._unwrapped # type: ignore[attr-defined]
self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf)
return return_udf
def registerJavaFunction(
self,
name: str,
javaClassName: str,
returnType: Optional["DataTypeOrString"] = None,
) -> None:
"""Register a Java user-defined function as a SQL function.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
.. versionadded:: 2.3.0
Parameters
----------
name : str
name of the user-defined function
javaClassName : str
fully qualified name of java class
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the registered Java function. The value can be either
a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.types import IntegerType
>>> spark.udf.registerJavaFunction(
... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength('test')").collect() # doctest: +SKIP
[Row(javaStringLength(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength")
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength2('test')").collect() # doctest: +SKIP
[Row(javaStringLength2(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer")
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength3('test')").collect() # doctest: +SKIP
[Row(javaStringLength3(test)=4)]
"""
jdt = None
if returnType is not None:
if not isinstance(returnType, DataType):<|fim▁hole|> self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
def registerJavaUDAF(self, name: str, javaClassName: str) -> None:
"""Register a Java user-defined aggregate function as a SQL function.
.. versionadded:: 2.3.0
name : str
name of the user-defined aggregate function
javaClassName : str
fully qualified name of java class
Examples
--------
>>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
... # doctest: +SKIP
>>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.createOrReplaceTempView("df")
>>> q = "SELECT name, javaUDAF(id) as avg from df group by name order by name desc"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(name='b', avg=102.0), Row(name='a', avg=102.0)]
"""
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
def _test() -> None:
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.udf
globs = pyspark.sql.udf.__dict__.copy()
spark = SparkSession.builder.master("local[4]").appName("sql.udf tests").getOrCreate()
globs["spark"] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.udf, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()<|fim▁end|>
|
returnType = _parse_datatype_string(returnType)
returnType = cast(DataType, returnType)
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
|
<|file_name|>vehicles.factory.spec.js<|end_file_name|><|fim▁begin|>/* globals describe, beforeEach, it, expect, inject, vehicles, VehicleMock */
describe("Vehicles Factory:", function() {
'use strict';
var $httpBackend,
vehicles,
request,
Showcase;
// Load the main module
beforeEach(module('sc'));
beforeEach(inject(function($injector, _vehicles_, _Showcase_) {
$httpBackend = $injector.get('$httpBackend');
vehicles = _vehicles_;
Showcase = _Showcase_;
request = $httpBackend.whenGET(Showcase.API + 'vehicles').respond(200, angular.copy(VehicleMock.ALL));
$httpBackend.whenGET(Showcase.API + 'vehicles/1').respond(200, VehicleMock.DETAIL);
$httpBackend.whenGET(Showcase.API + 'vehicles/compare/1').respond(200, angular.copy(VehicleMock.COMPARE));
}));
it("should return 4 vehicles", function() {
vehicles.getAll().then(function(response) {
expect(response.length).toEqual(4);
});
$httpBackend.flush();
});
it("should return Toyota as the first Brand", function() {
vehicles.getAll().then(function(response) {
expect(response[0].brand).toEqual('Toyota');
});
$httpBackend.flush();
});
it("should return a 404 error", function() {
request.respond(404, {error: true});
$httpBackend.expectGET(Showcase.API + 'vehicles');
vehicles.getAll().catch(function(error) {
expect(error.error).toBe(true);<|fim▁hole|>
$httpBackend.flush();
});
it("should return vehicle detail", function() {
vehicles.get(1).then(function(response) {
expect(response.model).toEqual('Avalon');
});
$httpBackend.flush();
});
it("should compare 3 vehicles", function() {
vehicles.compare(1,2,3).then(function(response) {
expect(response.length).toEqual(2);
});
$httpBackend.flush();
});
});<|fim▁end|>
|
});
|
<|file_name|>const-fn.rs<|end_file_name|><|fim▁begin|>#![crate_name = "foo"]
// @has foo/fn.bar.html
// @has - '//*[@class="rust fn"]' 'pub const fn bar() -> '
/// foo
pub const fn bar() -> usize {<|fim▁hole|> 2
}
// @has foo/struct.Foo.html
// @has - '//*[@class="method has-srclink"]' 'const fn new()'
pub struct Foo(usize);
impl Foo {
pub const fn new() -> Foo { Foo(0) }
}<|fim▁end|>
| |
<|file_name|>Cling.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.launcher2;
import android.content.Context;
import android.content.res.Resources;
import android.content.res.TypedArray;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.PorterDuff;
import android.graphics.PorterDuffXfermode;
import android.graphics.Rect;
import android.graphics.drawable.Drawable;
import android.util.AttributeSet;
import android.util.DisplayMetrics;
import android.view.FocusFinder;
import android.view.MotionEvent;
import android.view.View;
import android.widget.FrameLayout;
import com.android.launcher.R;
public class Cling extends FrameLayout {
static final String WORKSPACE_CLING_DISMISSED_KEY = "cling.workspace.dismissed";
static final String ALLAPPS_CLING_DISMISSED_KEY = "cling.allapps.dismissed";
static final String FOLDER_CLING_DISMISSED_KEY = "cling.folder.dismissed";
private static String WORKSPACE_PORTRAIT = "workspace_portrait";
private static String WORKSPACE_LANDSCAPE = "workspace_landscape";
private static String WORKSPACE_LARGE = "workspace_large";
private static String WORKSPACE_CUSTOM = "workspace_custom";
private static String ALLAPPS_PORTRAIT = "all_apps_portrait";
private static String ALLAPPS_LANDSCAPE = "all_apps_landscape";
private static String ALLAPPS_LARGE = "all_apps_large";
private static String FOLDER_PORTRAIT = "folder_portrait";
private static String FOLDER_LANDSCAPE = "folder_landscape";
private static String FOLDER_LARGE = "folder_large";
private Launcher mLauncher;
private boolean mIsInitialized;
private String mDrawIdentifier;
private Drawable mBackground;
private Drawable mPunchThroughGraphic;
private Drawable mHandTouchGraphic;
private int mPunchThroughGraphicCenterRadius;
private int mAppIconSize;
private int mButtonBarHeight;
private float mRevealRadius;
private int[] mPositionData;
private Paint mErasePaint;
public Cling(Context context) {
this(context, null, 0);
}
public Cling(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public Cling(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
TypedArray a = context.obtainStyledAttributes(attrs, R.styleable.Cling, defStyle, 0);
mDrawIdentifier = a.getString(R.styleable.Cling_drawIdentifier);
a.recycle();
setClickable(true);
}
void init(Launcher l, int[] positionData) {
if (!mIsInitialized) {
mLauncher = l;
mPositionData = positionData;
Resources r = getContext().getResources();
<|fim▁hole|> mPunchThroughGraphicCenterRadius =
r.getDimensionPixelSize(R.dimen.clingPunchThroughGraphicCenterRadius);
mAppIconSize = r.getDimensionPixelSize(R.dimen.app_icon_size);
mRevealRadius = r.getDimensionPixelSize(R.dimen.reveal_radius) * 1f;
mButtonBarHeight = r.getDimensionPixelSize(R.dimen.button_bar_height);
mErasePaint = new Paint();
mErasePaint.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.MULTIPLY));
mErasePaint.setColor(0xFFFFFF);
mErasePaint.setAlpha(0);
mIsInitialized = true;
}
}
void cleanup() {
mBackground = null;
mPunchThroughGraphic = null;
mHandTouchGraphic = null;
mIsInitialized = false;
}
public String getDrawIdentifier() {
return mDrawIdentifier;
}
private int[] getPunchThroughPositions() {
if (mDrawIdentifier.equals(WORKSPACE_PORTRAIT)) {
return new int[]{getMeasuredWidth() / 2, getMeasuredHeight() - (mButtonBarHeight / 2)};
} else if (mDrawIdentifier.equals(WORKSPACE_LANDSCAPE)) {
return new int[]{getMeasuredWidth() - (mButtonBarHeight / 2), getMeasuredHeight() / 2};
} else if (mDrawIdentifier.equals(WORKSPACE_LARGE)) {
final float scale = LauncherApplication.getScreenDensity();
final int cornerXOffset = (int) (scale * 15);
final int cornerYOffset = (int) (scale * 10);
return new int[]{getMeasuredWidth() - cornerXOffset, cornerYOffset};
} else if (mDrawIdentifier.equals(ALLAPPS_PORTRAIT) ||
mDrawIdentifier.equals(ALLAPPS_LANDSCAPE) ||
mDrawIdentifier.equals(ALLAPPS_LARGE)) {
return mPositionData;
}
return new int[]{-1, -1};
}
@Override
public View focusSearch(int direction) {
return this.focusSearch(this, direction);
}
@Override
public View focusSearch(View focused, int direction) {
return FocusFinder.getInstance().findNextFocus(this, focused, direction);
}
@Override
public boolean onHoverEvent(MotionEvent event) {
return (mDrawIdentifier.equals(WORKSPACE_PORTRAIT)
|| mDrawIdentifier.equals(WORKSPACE_LANDSCAPE)
|| mDrawIdentifier.equals(WORKSPACE_LARGE)
|| mDrawIdentifier.equals(ALLAPPS_PORTRAIT)
|| mDrawIdentifier.equals(ALLAPPS_LANDSCAPE)
|| mDrawIdentifier.equals(ALLAPPS_LARGE)
|| mDrawIdentifier.equals(WORKSPACE_CUSTOM));
}
@Override
public boolean onTouchEvent(android.view.MotionEvent event) {
if (mDrawIdentifier.equals(WORKSPACE_PORTRAIT) ||
mDrawIdentifier.equals(WORKSPACE_LANDSCAPE) ||
mDrawIdentifier.equals(WORKSPACE_LARGE) ||
mDrawIdentifier.equals(ALLAPPS_PORTRAIT) ||
mDrawIdentifier.equals(ALLAPPS_LANDSCAPE) ||
mDrawIdentifier.equals(ALLAPPS_LARGE)) {
int[] positions = getPunchThroughPositions();
for (int i = 0; i < positions.length; i += 2) {
double diff = Math.sqrt(Math.pow(event.getX() - positions[i], 2) +
Math.pow(event.getY() - positions[i + 1], 2));
if (diff < mRevealRadius) {
return false;
}
}
} else if (mDrawIdentifier.equals(FOLDER_PORTRAIT) ||
mDrawIdentifier.equals(FOLDER_LANDSCAPE) ||
mDrawIdentifier.equals(FOLDER_LARGE)) {
Folder f = mLauncher.getWorkspace().getOpenFolder();
if (f != null) {
Rect r = new Rect();
f.getHitRect(r);
if (r.contains((int) event.getX(), (int) event.getY())) {
return false;
}
}
}
return true;
};
@Override
protected void dispatchDraw(Canvas canvas) {
if (mIsInitialized) {
DisplayMetrics metrics = new DisplayMetrics();
mLauncher.getWindowManager().getDefaultDisplay().getMetrics(metrics);
// Initialize the draw buffer (to allow punching through)
Bitmap b = Bitmap.createBitmap(getMeasuredWidth(), getMeasuredHeight(),
Bitmap.Config.ARGB_8888);
Canvas c = new Canvas(b);
// Draw the background
if (mBackground == null) {
if (mDrawIdentifier.equals(WORKSPACE_PORTRAIT) ||
mDrawIdentifier.equals(WORKSPACE_LANDSCAPE) ||
mDrawIdentifier.equals(WORKSPACE_LARGE)) {
mBackground = getResources().getDrawable(R.drawable.bg_cling1);
} else if (mDrawIdentifier.equals(ALLAPPS_PORTRAIT) ||
mDrawIdentifier.equals(ALLAPPS_LANDSCAPE) ||
mDrawIdentifier.equals(ALLAPPS_LARGE)) {
mBackground = getResources().getDrawable(R.drawable.bg_cling2);
} else if (mDrawIdentifier.equals(FOLDER_PORTRAIT) ||
mDrawIdentifier.equals(FOLDER_LANDSCAPE)) {
mBackground = getResources().getDrawable(R.drawable.bg_cling3);
} else if (mDrawIdentifier.equals(FOLDER_LARGE)) {
mBackground = getResources().getDrawable(R.drawable.bg_cling4);
} else if (mDrawIdentifier.equals(WORKSPACE_CUSTOM)) {
mBackground = getResources().getDrawable(R.drawable.bg_cling5);
}
}
if (mBackground != null) {
mBackground.setBounds(0, 0, getMeasuredWidth(), getMeasuredHeight());
mBackground.draw(c);
} else {
c.drawColor(0x99000000);
}
int cx = -1;
int cy = -1;
float scale = mRevealRadius / mPunchThroughGraphicCenterRadius;
int dw = (int) (scale * mPunchThroughGraphic.getIntrinsicWidth());
int dh = (int) (scale * mPunchThroughGraphic.getIntrinsicHeight());
// Determine where to draw the punch through graphic
int[] positions = getPunchThroughPositions();
for (int i = 0; i < positions.length; i += 2) {
cx = positions[i];
cy = positions[i + 1];
if (cx > -1 && cy > -1) {
c.drawCircle(cx, cy, mRevealRadius, mErasePaint);
mPunchThroughGraphic.setBounds(cx - dw / 2, cy - dh / 2, cx + dw / 2, cy + dh / 2);
mPunchThroughGraphic.draw(c);
}
}
// Draw the hand graphic in All Apps
if (mDrawIdentifier.equals(ALLAPPS_PORTRAIT) ||
mDrawIdentifier.equals(ALLAPPS_LANDSCAPE) ||
mDrawIdentifier.equals(ALLAPPS_LARGE)) {
if (mHandTouchGraphic == null) {
mHandTouchGraphic = getResources().getDrawable(R.drawable.hand);
}
int offset = mAppIconSize / 4;
mHandTouchGraphic.setBounds(cx + offset, cy + offset,
cx + mHandTouchGraphic.getIntrinsicWidth() + offset,
cy + mHandTouchGraphic.getIntrinsicHeight() + offset);
mHandTouchGraphic.draw(c);
}
canvas.drawBitmap(b, 0, 0, null);
c.setBitmap(null);
b = null;
}
// Draw the rest of the cling
super.dispatchDraw(canvas);
};
}<|fim▁end|>
|
mPunchThroughGraphic = r.getDrawable(R.drawable.cling);
|
<|file_name|>soln.py<|end_file_name|><|fim▁begin|># Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# We use the inorder to find which elements are left and right of the curr element.
# And the post order to start with the first elemeent and then construct right and left trees.
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
def helper(inorderL, inorderR):
# base case
if inorderL >= inorderR:
return None
nonlocal postorder
curr = postorder.pop()
root = TreeNode(curr)
currPos = inorderMap[curr]
root.right = helper(currPos+1, inorderR)
root.left = helper(inorderL, currPos)
return root
<|fim▁hole|><|fim▁end|>
|
inorderMap = {v:k for k, v in enumerate(inorder)}
return helper(0, len(inorder))
|
<|file_name|>tests.rs<|end_file_name|><|fim▁begin|>use std::env;
use std::fs;
use std::path::PathBuf;
use actix::prelude::*;
use base64;
use dirs;
use env_logger;
use failure::{err_msg, Error, Fail};
use futures::*;
use tokio_core::reactor::Core;
use crate::actors::admin::Admin;
use crate::actors::agent::Agent;
use crate::actors::forward_agent::ForwardAgent;
use crate::actors::ForwardA2AMsg;
use crate::domain::a2a::*;
use crate::domain::a2connection::*;
use crate::domain::admin_message::AdminQuery;
use crate::domain::config::*;
use crate::domain::invite::*;
use crate::domain::key_deligation_proof::*;
use crate::domain::status::*;
use crate::indy::{crypto, did, wallet, WalletHandle};
use crate::utils::futures::*;
use crate::domain::key_derivation::KeyDerivationFunction;
use std::sync::{RwLock, Arc};
pub const EDGE_AGENT_WALLET_ID: &'static str = "edge_agent_wallet_id";
pub const EDGE_AGENT_WALLET_CONFIG: &'static str = "{\"id\": \"edge_agent_wallet_id\"}";
pub const EDGE_AGENT_WALLET_PASSPHRASE: &'static str = "edge_agent_wallet_passphrase";
pub const EDGE_AGENT_WALLET_CREDENTIALS: &'static str = "{\"key\": \"edge_agent_wallet_passphrase\"}";
pub const EDGE_AGENT_DID: &'static str = "NcYxiDXkpYi6ov5FcYDi1e";
pub const EDGE_AGENT_DID_INFO: &'static str = "{\"did\": \"NcYxiDXkpYi6ov5FcYDi1e\", \"seed\": \"0000000000000000000000000000Edge\"}";
pub const EDGE_AGENT_DID_VERKEY: &'static str = "B4aUxMQdPFkwBtcNUgs4fAbJhLSbXjQmrXByzL6gfDEq";
pub const EDGE_PAIRWISE_DID: &'static str = "BJ8T5EQm8QoVopUR2sd5L2";
pub const EDGE_PAIRWISE_DID_INFO: &'static str = "{\"did\": \"BJ8T5EQm8QoVopUR2sd5L2\", \"seed\": \"00000000000000000000EdgePairwise\"}";
pub const EDGE_PAIRWISE_DID_VERKEY: &'static str = "6cTQci8sG8CEr3pNz71yqxbEch8CiNwoNhoUE7unpWkS";
pub const FORWARD_AGENT_ENDPOINT: &'static str = "http://localhost:8080";
pub const EDGE_PAIRWISE_DID_2: &'static str = "WNnf2uJPZNmvMmA6LkdVAp";<|fim▁hole|>
pub static mut FORWARD_AGENT_WALLET_HANDLE: WalletHandle = WalletHandle(0);
pub const FORWARD_AGENT_WALLET_ID: &'static str = "forward_agent_wallet_id";
pub const FORWARD_AGENT_WALLET_CONFIG: &'static str = "{\"id\": \"forward_agent_wallet_id\"}";
pub const FORWARD_AGENT_WALLET_PASSPHRASE: &'static str = "forward_agent_wallet_passphrase";
pub const FORWARD_AGENT_WALLET_CREDENTIALS: &'static str = "{\"key\": \"forward_agent_wallet_passphrase\"}";
pub const FORWARD_AGENT_DID: &'static str = "VsKV7grR1BUE29mG2Fm2kX";
pub const FORWARD_AGENT_DID_SEED: &'static str = "0000000000000000000000000Forward";
pub const FORWARD_AGENT_DID_INFO: &'static str = "{\"did\": \"VsKV7grR1BUE29mG2Fm2kX\", \"seed\": \"0000000000000000000000000Forward\"}";
pub const FORWARD_AGENT_DID_VERKEY: &'static str = "Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR";
pub const PHONE_NO: &'static str = "80000000000";
pub const PAYLOAD: [u8; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
pub fn indy_home_path() -> PathBuf {
// TODO: FIXME: Provide better handling for the unknown home path case!!!
let mut path = dirs::home_dir().unwrap_or(PathBuf::from("/home/indy"));
path.push(if cfg!(target_os = "ios") { "Documents/.indy_client" } else { ".indy_client" });
path
}
pub fn wallet_home_path() -> PathBuf {
let mut path = indy_home_path();
path.push("wallet");
path
}
pub fn wallet_path(wallet_name: &str) -> PathBuf {
let mut path = wallet_home_path();
path.push(wallet_name);
path
}
pub fn tmp_path() -> PathBuf {
let mut path = env::temp_dir();
path.push("indy_client");
path
}
pub fn tmp_file_path(file_name: &str) -> PathBuf {
let mut path = tmp_path();
path.push(file_name);
path
}
pub fn cleanup_indy_home() {
let path = indy_home_path();
if path.exists() {
fs::remove_dir_all(path).unwrap();
}
}
pub fn cleanup_temp() {
let path = tmp_path();
if path.exists() {
fs::remove_dir_all(path).unwrap();
}
}
pub fn cleanup_storage() {
cleanup_indy_home();
cleanup_temp();
}
pub fn run_test<F, B>(f: F)
where
F: FnOnce(Addr<ForwardAgent>, Arc<RwLock<Admin>>) -> B + 'static,
B: IntoFuture<Item=(), Error=Error> + 'static {
crate::indy::logger::set_default_logger(None).ok();
env_logger::try_init().ok();
cleanup_storage();
System::run(|| {
Arbiter::spawn_fn(move || {
let admin = Admin::create();
let admin_for_test = admin.clone();
future::ok(())
.and_then(move |_| {
let admin = Some(admin);
ForwardAgent::create_or_restore(forward_agent_config(), wallet_storage_config(), admin)
})
.and_then(move |fw_agent| {
f(fw_agent, admin_for_test)
})
.and_then(|wallet_handle|
unsafe {
wallet::close_wallet(FORWARD_AGENT_WALLET_HANDLE)
.map_err(|err| err.context("Can't close Forward Agent wallet.`").into())
}
)
.map(move |_| {
System::current().stop()
})
.map_err(|err| panic!("Test error: {}!", err))
})
});
}
pub fn run_agent_test<F, B>(f: F)
where
F: FnOnce((WalletHandle, String, String, String, String, Addr<ForwardAgent>)) -> B + 'static,
B: IntoFuture<Item=WalletHandle, Error=Error> + 'static {
run_test(|forward_agent, admin| {
future::ok(())
.and_then(|()| {
setup_agent(forward_agent)
})
.and_then(f)
.map(|wallet_handle| wallet::close_wallet(wallet_handle).wait().unwrap())
})
}
pub fn run_admin_test<F, B>(f: F)
where
F: FnOnce((WalletHandle, String, String, String, String, Addr<ForwardAgent>, Arc<RwLock<Admin>>)) -> B + 'static,
B: IntoFuture<Item=WalletHandle, Error=Error> + 'static {
run_test(|forward_agent, admin| {
future::ok(())
.and_then(|()| {
setup_agent(forward_agent)
})
.and_then(move |(agent_wallet_handle, agent_did, agent_verkey, with_pairwise_did, with_pairwise_did_verkey, forward_agent)| {
f((agent_wallet_handle, agent_did, agent_verkey, with_pairwise_did, with_pairwise_did_verkey, forward_agent, admin))
})
.map(|wallet_handle| wallet::close_wallet(wallet_handle).wait().unwrap())
})
}
pub fn setup_agent(forward_agent: Addr<ForwardAgent>) -> ResponseFuture<(WalletHandle, String, String, String, String, Addr<ForwardAgent>), Error> {
future::ok(())
.and_then(|()| {
let e_wallet_handle = edge_wallet_setup().wait().unwrap();
let connect_msg = compose_connect(e_wallet_handle).wait().unwrap();
forward_agent
.send(ForwardA2AMsg(connect_msg))
.from_err()
.and_then(|res| res)
.map(move |connected_msg| (forward_agent, e_wallet_handle, connected_msg))
})
.and_then(|(forward_agent, e_wallet_handle, connected_msg)| {
let (sender_verkey, pairwise_did, pairwise_verkey) = decompose_connected(e_wallet_handle, &connected_msg).wait().unwrap();
let signup_msg = compose_signup(e_wallet_handle, &pairwise_did, &pairwise_verkey).wait().unwrap();
forward_agent
.send(ForwardA2AMsg(signup_msg))
.from_err()
.and_then(|res| res)
.map(move |signedup_msg| (forward_agent, e_wallet_handle, signedup_msg, pairwise_did, pairwise_verkey))
})
.and_then(move |(forward_agent, e_wallet_handle, signedup_msg, pairwise_did, pairwise_verkey)| {
let sender_verkey = decompose_signedup(e_wallet_handle, &signedup_msg).wait().unwrap();
let create_agent_msg = compose_create_agent(e_wallet_handle, &pairwise_did, &pairwise_verkey).wait().unwrap();
forward_agent
.send(ForwardA2AMsg(create_agent_msg))
.from_err()
.and_then(|res| res)
.map(move |agent_created_msg| (e_wallet_handle, agent_created_msg, pairwise_verkey, forward_agent))
})
.and_then(|(e_wallet_handle, agent_created_msg, pairwise_verkey, forward_agent)| {
let (_, agent_did, agent_verkey) = decompose_agent_created(e_wallet_handle, &agent_created_msg).wait().unwrap();
let create_key_msg = compose_create_key(e_wallet_handle, &agent_did, &agent_verkey, EDGE_PAIRWISE_DID, EDGE_PAIRWISE_DID_VERKEY).wait().unwrap();
forward_agent
.send(ForwardA2AMsg(create_key_msg))
.from_err()
.and_then(|res| res)
.map(move |key_created_msg| (e_wallet_handle, key_created_msg, agent_did, agent_verkey, forward_agent))
})
.map(|(e_wallet_handle, key_created_msg, agent_did, agent_verkey, forward_agent)| {
let (_, key) = decompose_key_created(e_wallet_handle, &key_created_msg).wait().unwrap();
(e_wallet_handle, agent_did, agent_verkey, key.with_pairwise_did, key.with_pairwise_did_verkey, forward_agent)
})
.into_box()
}
pub fn forward_agent_config() -> ForwardAgentConfig {
ForwardAgentConfig {
wallet_id: FORWARD_AGENT_WALLET_ID.into(),
wallet_passphrase: FORWARD_AGENT_WALLET_PASSPHRASE.into(),
did: FORWARD_AGENT_DID.into(),
did_seed: Some(FORWARD_AGENT_DID_SEED.into()),
endpoint: FORWARD_AGENT_ENDPOINT.into(),
}
}
pub fn wallet_storage_config() -> WalletStorageConfig {
WalletStorageConfig {
xtype: None,
config: None,
credentials: None,
plugin_library_path: None,
plugin_init_function: None
}
}
pub fn edge_wallet_setup() -> BoxedFuture<WalletHandle, Error> {
future::ok(())
.and_then(|_| {
wallet::create_wallet(EDGE_AGENT_WALLET_CONFIG, EDGE_AGENT_WALLET_CREDENTIALS)
.map_err(|err| err.context("Can't create edge agent wallet.").into())
})
.and_then(|_| {
wallet::open_wallet(EDGE_AGENT_WALLET_CONFIG, EDGE_AGENT_WALLET_CREDENTIALS)
.map_err(|err| err.context("Can't open edge agent wallet.").into())
})
.and_then(|wallet_handle| {
did::create_and_store_my_did(wallet_handle, EDGE_AGENT_DID_INFO)
.map(move |_| wallet_handle)
.map_err(|err| err.context("Can't create edge agent did.").into())
})
.and_then(|wallet_handle| {
did::create_and_store_my_did(wallet_handle, EDGE_PAIRWISE_DID_INFO)
.map(move |_| wallet_handle)
.map_err(|err| err.context("Can't create edge agent did.").into())
})
.into_box()
}
pub fn compose_connect(wallet_handle: WalletHandle) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::Connect(
Connect {
from_did: EDGE_AGENT_DID.into(),
from_did_verkey: EDGE_AGENT_DID_VERKEY.into(),
}))];
let msg = A2AMessage::prepare_authcrypted(wallet_handle,
EDGE_AGENT_DID_VERKEY,
FORWARD_AGENT_DID_VERKEY,
&msgs).wait().unwrap();
compose_forward(wallet_handle, FORWARD_AGENT_DID, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn decompose_connected(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, String, String), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
if let Some(A2AMessage::Version1(A2AMessageV1::Connected(msg))) = msgs.pop() {
let Connected { with_pairwise_did: pairwise_did, with_pairwise_did_verkey: pairwise_verkey } = msg;
Ok((sender_verkey, pairwise_did, pairwise_verkey))
} else {
Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_signup(wallet_handle: WalletHandle, pairwise_did: &str, pairwise_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::SignUp(SignUp {}))];
let msg = A2AMessage::prepare_authcrypted(wallet_handle,
EDGE_AGENT_DID_VERKEY,
pairwise_verkey,
&msgs).wait().unwrap();
compose_forward(wallet_handle, &pairwise_did, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn decompose_signedup(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<String, Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
if let Some(A2AMessage::Version1(A2AMessageV1::SignedUp(_))) = msgs.pop() {
Ok(sender_verkey)
} else {
Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_create_agent(wallet_handle: WalletHandle, pairwise_did: &str, pairwise_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = vec![A2AMessage::Version1(A2AMessageV1::CreateAgent(CreateAgent {}))];
let msg = A2AMessage::prepare_authcrypted(wallet_handle,
EDGE_AGENT_DID_VERKEY,
pairwise_verkey,
&msgs).wait().unwrap();
compose_forward(wallet_handle, pairwise_did, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn decompose_agent_created(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, String, String), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
if let Some(A2AMessage::Version1(A2AMessageV1::AgentCreated(agent_created))) = msgs.pop() {
let AgentCreated { with_pairwise_did: pw_did, with_pairwise_did_verkey: pw_vk } = agent_created;
Ok((sender_verkey, pw_did, pw_vk))
} else {
Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_create_key(wallet_handle: WalletHandle, agent_did: &str, agent_verkey: &str, for_did: &str, for_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::CreateKey(
CreateKey {
for_did: for_did.into(),
for_did_verkey: for_verkey.into(),
}))];
let msg = A2AMessage::prepare_authcrypted(wallet_handle,
EDGE_AGENT_DID_VERKEY,
agent_verkey,
&msgs).wait().unwrap();
compose_forward(wallet_handle, agent_did, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn decompose_key_created(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, KeyCreated), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
if let Some(A2AMessage::Version1(A2AMessageV1::KeyCreated(key_created))) = msgs.pop() {
Ok((sender_verkey, key_created))
} else {
Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_create_connection_request(wallet_handle: WalletHandle,
agent_did: &str,
agent_verkey: &str,
agent_pairwise_did: &str,
agent_pairwise_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let create_msg = build_create_message_request(RemoteMessageType::ConnReq, None);
let msg_details = A2AMessage::Version1(A2AMessageV1::MessageDetail(
MessageDetail::ConnectionRequest(
ConnectionRequestMessageDetail {
key_dlg_proof: gen_key_delegated_proof(wallet_handle, EDGE_PAIRWISE_DID_VERKEY, &agent_pairwise_did, &agent_pairwise_verkey),
target_name: None,
phone_no: Some(PHONE_NO.to_string()),
use_public_did: Some(true),
thread_id: None,
})));
let msgs = [create_msg, msg_details];
compose_message(wallet_handle, &msgs, agent_pairwise_did, agent_pairwise_verkey, agent_did, agent_verkey)
}
pub fn decompose_connection_request_created(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, String, InviteDetail), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
assert_eq!(2, msgs.len());
match (msgs.remove(0), msgs.remove(0)) {
(A2AMessage::Version1(A2AMessageV1::MessageCreated(msg_created)),
A2AMessage::Version1(A2AMessageV1::MessageDetail(MessageDetail::ConnectionRequestResp(msg_details)))) =>
Ok((sender_verkey, msg_created.uid, msg_details.invite_detail)),
_ => Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_create_connection_request_answer(wallet_handle: WalletHandle,
agent_did: &str,
agent_verkey: &str,
agent_pairwise_did: &str,
agent_pairwise_verkey: &str,
reply_to_msg_id: &str) -> BoxedFuture<Vec<u8>, Error> {
let (remote_user_pw_did, remote_user_pw_verkey) = did::create_and_store_my_did(wallet_handle, "{}").wait().unwrap();
let (remote_agent_pw_did, remote_agent_pw_verkey) = did::create_and_store_my_did(wallet_handle, "{}").wait().unwrap();
let create_msg = build_create_message_request(RemoteMessageType::ConnReqAnswer, Some(reply_to_msg_id.to_string()));
let msg_details: A2AMessage = A2AMessage::Version1(A2AMessageV1::MessageDetail(
MessageDetail::ConnectionRequestAnswer(
ConnectionRequestAnswerMessageDetail {
key_dlg_proof: Some(gen_key_delegated_proof(wallet_handle, EDGE_PAIRWISE_DID_VERKEY, &agent_pairwise_did, &agent_pairwise_verkey)),
sender_detail: SenderDetail {
did: remote_user_pw_did,
verkey: remote_user_pw_verkey.clone(),
agent_key_dlg_proof: gen_key_delegated_proof(wallet_handle, &remote_user_pw_verkey, &remote_agent_pw_did, &remote_agent_pw_verkey),
name: None,
logo_url: None,
public_did: None,
},
sender_agency_detail: ForwardAgentDetail {
did: FORWARD_AGENT_DID.to_string(),
verkey: FORWARD_AGENT_DID_VERKEY.to_string(),
endpoint: FORWARD_AGENT_ENDPOINT.to_string(),
},
answer_status_code: MessageStatusCode::Accepted,
thread: None,
}
)));
let msgs = [create_msg, msg_details];
compose_message(wallet_handle, &msgs, agent_pairwise_did, agent_pairwise_verkey, agent_did, agent_verkey)
}
pub fn decompose_connection_request_answer_created(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, String), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
assert_eq!(1, msgs.len());
match msgs.remove(0) {
A2AMessage::Version1(A2AMessageV1::MessageCreated(msg_created)) => Ok((sender_verkey, msg_created.uid)),
_ => Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_create_general_message(wallet_handle: WalletHandle,
agent_did: &str,
agent_verkey: &str,
agent_pairwise_did: &str,
agent_pairwise_verkey: &str,
mtype: RemoteMessageType) -> BoxedFuture<Vec<u8>, Error> {
let create_msg = build_create_message_request(mtype, None);
let msg_details: A2AMessage = A2AMessage::Version1(A2AMessageV1::MessageDetail(
MessageDetail::General(
GeneralMessageDetail {
msg: PAYLOAD.to_vec(),
title: None,
detail: None,
}
)
));
let msgs = [create_msg, msg_details];
compose_message(wallet_handle, &msgs, agent_pairwise_did, agent_pairwise_verkey, agent_did, agent_verkey)
}
fn build_create_message_request(mtype: RemoteMessageType, reply_to_msg_id: Option<String>) -> A2AMessage {
A2AMessage::Version1(A2AMessageV1::CreateMessage(CreateMessage {
mtype,
send_msg: false,
uid: None,
reply_to_msg_id,
}))
}
pub fn decompose_general_message_created(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, String), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
assert_eq!(1, msgs.len());
match msgs.remove(0) {
A2AMessage::Version1(A2AMessageV1::MessageCreated(msg_created)) => Ok((sender_verkey, msg_created.uid)),
_ => Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_get_messages(wallet_handle: WalletHandle,
agent_did: &str,
agent_verkey: &str,
agent_pairwise_did: &str,
agent_pairwise_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::GetMessages(GetMessages {
exclude_payload: None,
uids: Vec::new(),
status_codes: Vec::new(),
}))];
compose_message(wallet_handle, &msgs, agent_pairwise_did, agent_pairwise_verkey, agent_did, agent_verkey)
}
pub fn decompose_get_messages(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, Vec<GetMessagesDetailResponse>), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
assert_eq!(1, msgs.len());
match msgs.remove(0) {
A2AMessage::Version1(A2AMessageV1::Messages(messages)) => Ok((sender_verkey, messages.msgs)),
_ => Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_update_message_status_message(wallet_handle: WalletHandle,
agent_did: &str,
agent_verkey: &str,
agent_pairwise_did: &str,
agent_pairwise_verkey: &str,
uid: &str,
status_code: MessageStatusCode) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::UpdateMessageStatus(UpdateMessageStatus {
uids: vec![uid.to_string()],
status_code,
}))];
compose_message(wallet_handle, &msgs, agent_pairwise_did, agent_pairwise_verkey, agent_did, agent_verkey)
}
pub fn decompose_message_status_updated(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, MessageStatusUpdated), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
assert_eq!(1, msgs.len());
match msgs.remove(0) {
A2AMessage::Version1(A2AMessageV1::MessageStatusUpdated(msg)) => Ok((sender_verkey, msg)),
_ => Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_update_connection_status_message(wallet_handle: WalletHandle,
agent_did: &str,
agent_verkey: &str,
agent_pairwise_did: &str,
agent_pairwise_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::UpdateConnectionStatus(UpdateConnectionStatus {
status_code: ConnectionStatus::Deleted
}))];
compose_message(wallet_handle, &msgs, agent_pairwise_did, agent_pairwise_verkey, agent_did, agent_verkey)
}
pub fn decompose_connection_status_updated(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, ConnectionStatusUpdated), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
assert_eq!(1, msgs.len());
match msgs.remove(0) {
A2AMessage::Version1(A2AMessageV1::ConnectionStatusUpdated(msg)) => Ok((sender_verkey, msg)),
_ => Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_get_messages_by_connection(wallet_handle: WalletHandle,
agent_did: &str,
agent_verkey: &str,
agent_pairwise_did: &str,
agent_pairwise_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::GetMessagesByConnections(GetMessagesByConnections {
exclude_payload: None,
uids: Vec::new(),
status_codes: Vec::new(),
pairwise_dids: Vec::new(),
}))];
let msg = A2AMessage::prepare_authcrypted(wallet_handle,
EDGE_AGENT_DID_VERKEY,
agent_verkey,
&msgs).wait().unwrap();
compose_forward(wallet_handle, agent_did, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn decompose_get_messages_by_connection(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<(String, Vec<MessagesByConnection>), Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
assert_eq!(1, msgs.len());
match msgs.remove(0) {
A2AMessage::Version1(A2AMessageV1::MessagesByConnections(messages)) => Ok((sender_verkey, messages.msgs)),
_ => Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_update_configs(wallet_handle: WalletHandle, agent_did: &str, agent_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::UpdateConfigs(
UpdateConfigs {
configs: vec![
ConfigOption { name: "zoom_zoom".to_string(), value: "value".to_string() },
ConfigOption { name: "name".to_string(), value: "super agent".to_string() },
ConfigOption { name: "logoUrl".to_string(), value: "http://logo.url".to_string() }
]
}))];
let msg = A2AMessage::prepare_authcrypted(wallet_handle,
EDGE_AGENT_DID_VERKEY,
agent_verkey,
&msgs).wait().unwrap();
compose_forward(wallet_handle, agent_did, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn compose_get_configs(wallet_handle: WalletHandle, agent_did: &str, agent_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::GetConfigs(
GetConfigs {
configs: vec![String::from("name"), String::from("logoUrl")]
}))];
let msg = A2AMessage::prepare_authcrypted(wallet_handle,
EDGE_AGENT_DID_VERKEY,
agent_verkey,
&msgs).wait().unwrap();
compose_forward(wallet_handle, agent_did, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn decompose_configs(wallet_handle: WalletHandle, msg: &[u8]) -> BoxedFuture<Vec<ConfigOption>, Error> {
A2AMessage::unbundle_authcrypted(wallet_handle, EDGE_AGENT_DID_VERKEY, &msg)
.and_then(|(sender_verkey, mut msgs)| {
if let Some(A2AMessage::Version1(A2AMessageV1::Configs(configs))) = msgs.pop() {
Ok((configs.configs))
} else {
Err(err_msg("Invalid message"))
}
})
.into_box()
}
pub fn compose_remove_configs(wallet_handle: WalletHandle, agent_did: &str, agent_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::RemoveConfigs(
RemoveConfigs {
configs: vec![String::from("name")]
}))];
let msg = A2AMessage::prepare_authcrypted(wallet_handle,
EDGE_AGENT_DID_VERKEY,
agent_verkey,
&msgs).wait().unwrap();
compose_forward(wallet_handle, agent_did, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn compose_forward(wallet_handle: WalletHandle, recipient_did: &str, recipient_vk: &str, msg: Vec<u8>) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::Forward(
ForwardV1 {
fwd: recipient_did.into(),
msg,
}))];
A2AMessage::prepare_anoncrypted(wallet_handle, recipient_vk, &msgs)
}
pub fn compose_authcrypted_forward(wallet_handle: WalletHandle, sender_vk: &str, recipient_did: &str, recipient_vk: &str, msg: Vec<u8>) -> BoxedFuture<Vec<u8>, Error> {
let msgs = [A2AMessage::Version1(A2AMessageV1::Forward(
ForwardV1 {
fwd: recipient_did.into(),
msg,
}))];
A2AMessage::prepare_authcrypted(wallet_handle, sender_vk, recipient_vk, &msgs)
}
pub fn compose_message(wallet_handle: WalletHandle,
msgs: &[A2AMessage],
agent_pairwise_did: &str,
agent_pairwise_verkey: &str,
agent_did: &str,
agent_verkey: &str) -> BoxedFuture<Vec<u8>, Error> {
let msg = A2AMessage::prepare_authcrypted(wallet_handle, EDGE_PAIRWISE_DID_VERKEY, agent_pairwise_verkey, &msgs).wait().unwrap();
let msg = compose_authcrypted_forward(wallet_handle, EDGE_AGENT_DID_VERKEY, agent_pairwise_did, agent_verkey, msg).wait().unwrap();
compose_forward(wallet_handle, agent_did, FORWARD_AGENT_DID_VERKEY, msg)
}
pub fn gen_key_delegated_proof(wallet_handle: WalletHandle, signer_vk: &str, did: &str, verkey: &str) -> KeyDlgProof {
let signature = format!("{}{}", did, verkey);
let signature = crypto::sign(wallet_handle, signer_vk, signature.as_bytes()).wait().unwrap();
let signature = base64::encode(&signature);
KeyDlgProof {
agent_did: did.into(),
agent_delegated_key: verkey.into(),
signature,
}
}<|fim▁end|>
|
pub const EDGE_PAIRWISE_DID_INFO_2: &'static str = "{\"did\": \"WNnf2uJPZNmvMmA6LkdVAp\", \"seed\": \"0000000000000000000EdgePairwise2\"}";
pub const EDGE_PAIRWISE_DID_VERKEY_2: &'static str = "H1d58X25s91rTXdd46hTfn7mhtPmohQFYRHD379UtytR";
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><<<<<<< HEAD
<<<<<<< HEAD
from . import client, rest, session
=======
from . import client, rest, session<|fim▁hole|>
from . import client, rest, session
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453<|fim▁end|>
|
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
|
<|file_name|>RelayRecordState.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2013-2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule RelayRecordState
* @flow
* @typechecks
*/
'use strict';<|fim▁hole|> /**
* Record exists (either fetched from the server or produced by a local,
* optimistic update).
*/
EXISTENT: 'EXISTENT',
/**
* Record is known not to exist (either as the result of a mutation, or
* because the server returned `null` when queried for the record).
*/
NONEXISTENT: 'NONEXISTENT',
/**
* Record State is unknown because it has not yet been fetched from the
* server.
*/
UNKNOWN: 'UNKNOWN',
};
module.exports = RelayRecordState;<|fim▁end|>
|
export type RecordState = $Enum<typeof RelayRecordState>;
var RelayRecordState = {
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>__author__ = 'Viktor Kerkez <[email protected]>'
__date__ = '18 February 2010'<|fim▁hole|><|fim▁end|>
|
__copyright__ = 'Copyright (c) 2010 Viktor Kerkez'
|
<|file_name|>test_binary_sensor.py<|end_file_name|><|fim▁begin|>"""The tests for the Tasmota binary sensor platform."""
import copy
from datetime import timedelta
import json
from unittest.mock import patch
from hatasmota.utils import (
get_topic_stat_result,
get_topic_stat_status,
get_topic_tele_sensor,
get_topic_tele_will,
)
from homeassistant.components import binary_sensor
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import (
ATTR_ASSUMED_STATE,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
import homeassistant.util.dt as dt_util
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message, async_fire_time_changed
async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test periodic state update
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"OFF"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test polled state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test force update flag
entity = hass.data["entity_components"]["binary_sensor"].get_entity(
"binary_sensor.tasmota_binary_sensor_1"
)
assert entity.force_update
async def test_controlling_state_via_mqtt_switchname(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Custom Name"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.custom_name")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Custom Name":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Custom Name":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
# Test periodic state update
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Custom Name":"ON"}')
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Custom Name":"OFF"}')
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
# Test polled state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Custom Name":"ON"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Custom Name":"OFF"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
async def test_pushon_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 13
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test periodic state update is ignored
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test polled state update is ignored
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
async def test_friendly_names(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swc"][1] = 1
config["swn"][1] = "Beer"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert state.attributes.get("friendly_name") == "Tasmota binary_sensor 1"
state = hass.states.get("binary_sensor.beer")
assert state.state == "unavailable"
assert state.attributes.get("friendly_name") == "Beer"
async def test_off_delay(hass, mqtt_mock, setup_tasmota):
"""Test off_delay option."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 13 # PUSHON: 1s off_delay
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event.data["new_state"].state)
<|fim▁hole|> hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
assert events == ["off"]
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
assert events == ["off", "on"]
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
assert events == ["off", "on", "on"]
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert events == ["off", "on", "on", "off"]
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, binary_sensor.DOMAIN, config
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability(hass, mqtt_mock, binary_sensor.DOMAIN, config)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, config
)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
poll_topic = "tasmota_49A3BC/cmnd/STATUS"
await help_test_availability_poll_state(
hass,
mqtt_client_mock,
mqtt_mock,
binary_sensor.DOMAIN,
config,
poll_topic,
"10",
)
async def test_discovery_removal_binary_sensor(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered binary_sensor."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config2 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = 1
config2["swc"][0] = 0
config1["swn"][0] = "Test"
config2["swn"][0] = "Test"
await help_test_discovery_removal(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_binary_sensor(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test update of discovered binary_sensor."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
with patch(
"homeassistant.components.tasmota.binary_sensor.TasmotaBinarySensor.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config, discovery_update
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
unique_id = f"{DEFAULT_CONFIG['mac']}_binary_sensor_switch_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, binary_sensor.DOMAIN, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
topics = [
get_topic_stat_result(config),
get_topic_tele_sensor(config),
get_topic_stat_status(config, 10),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, binary_sensor.DOMAIN, config, topics
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, config
)<|fim▁end|>
| |
<|file_name|>StatusCodeRepos.java<|end_file_name|><|fim▁begin|>package org.contourdynamics.cms.repository;
import javax.enterprise.context.ApplicationScoped;
import org.apache.deltaspike.data.api.EntityManagerConfig;
import org.apache.deltaspike.data.api.EntityRepository;
import org.apache.deltaspike.data.api.Repository;
import org.apache.deltaspike.data.api.AbstractEntityRepository;<|fim▁hole|>import org.contourdynamics.cms.Entities.StatusCode;
import org.contourdynamics.cms.producers.MainEMResolver;
@ApplicationScoped
@Repository(forEntity = StatusCode.class)
@EntityManagerConfig(entityManagerResolver = MainEMResolver.class)
public interface StatusCodeRepos extends EntityRepository<StatusCode, Integer> {
//public abstract class StatusCodeRepos extends AbstractEntityRepository<StatusCode, Integer> {
}<|fim▁end|>
|
import org.contourdynamics.cms.Entities.BpCmpy;
|
<|file_name|>objects.py<|end_file_name|><|fim▁begin|>#
# NineMSN CatchUp TV Video API Library
#
# This code is forked from Network Ten CatchUp TV Video API Library
# Copyright (c) 2013 Adam Malcontenti-Wilson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from brightcove.core import APIObject, Field, DateTimeField, ListField, EnumField
from brightcove.objects import ItemCollection, enum
ChannelNameEnum = enum('ten', 'eleven', 'one')
PlaylistTypeEnum = enum('full_episodes', 'web_extras', 'news', 'season', 'week', 'category', 'special', 'preview')
MediaDeliveryEnum = enum('default', 'http', 'http_ios')
class EnumNumField(Field):
def __init__(self, enum_cls, help=None):
self.help = help
self.enum_cls = enum_cls
def to_python(self, value):
for i, field in enumerate(self.enum_cls._fields):
if i == value:
return field<|fim▁hole|>
class Playlist(APIObject):
_fields = ['name', 'type', 'season', 'week', 'query']
type = EnumField(PlaylistTypeEnum)
def __repr__(self):
return '<Playlist name=\'{0}\'>'.format(self.name)
class Show(APIObject):
_fields = ['showName', 'channelName', 'videoLink', 'mobileLink', 'logo', 'fanart', 'playlists']
channelName = EnumField(ChannelNameEnum)
playlists = ListField(Playlist)
def __repr__(self):
return '<Show name=\'{0}\'>'.format(self.showName)
class AMFRendition(APIObject):
_fields = ['defaultURL', 'audioOnly', 'mediaDeliveryType', 'encodingRate',
'frameHeight', 'frameWidth', 'size',
'videoCodec', 'videoContainer']
mediaDeliveryType = EnumNumField(MediaDeliveryEnum)
def __repr__(self):
return '<Rendition bitrate=\'{0}\' type=\'{1}\' frameSize=\'{2}x{3}\'>'.format(self.encodingRate, self.mediaDeliveryType, self.frameWidth, self.frameHeight)
class ShowItemCollection(ItemCollection):
_item_class = Show
items = ListField(Show)
class PlaylistItemCollection(ItemCollection):
_item_class = Playlist
items = ListField(Playlist)
class MediaRenditionItemCollection(ItemCollection):
_item_class = AMFRendition
items = ListField(AMFRendition)<|fim▁end|>
|
raise Exception('Invalid Enum: %s' % value)
def from_python(self, value):
return self.enum_cls._fields[value]
|
<|file_name|>CacheConfig.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2002-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cache.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* {@code @CacheConfig} provides a mechanism for sharing common cache-related
* settings at the class level.
*
* <p>When this annotation is present on a given class, it provides a set
* of default settings for any cache operation defined in that class.
*
* @author Stephane Nicoll
* @author Sam Brannen
* @since 4.1
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface CacheConfig {
/**
* Names of the default caches to consider for caching operations defined
* in the annotated class.
* <p>If none is set at the operation level, these are used instead of the default.
* <p>May be used to determine the target cache (or caches), matching the
* qualifier value or the bean names of a specific bean definition.<|fim▁hole|> */
String[] cacheNames() default {};
/**
* The bean name of the default {@link org.springframework.cache.interceptor.KeyGenerator} to
* use for the class.
* <p>If none is set at the operation level, this one is used instead of the default.
* <p>The key generator is mutually exclusive with the use of a custom key. When such key is
* defined for the operation, the value of this key generator is ignored.
*/
String keyGenerator() default "";
/**
* The bean name of the custom {@link org.springframework.cache.CacheManager} to use to
* create a default {@link org.springframework.cache.interceptor.CacheResolver} if none
* is set already.
* <p>If no resolver and no cache manager are set at the operation level, and no cache
* resolver is set via {@link #cacheResolver}, this one is used instead of the default.
* @see org.springframework.cache.interceptor.SimpleCacheResolver
*/
String cacheManager() default "";
/**
* The bean name of the custom {@link org.springframework.cache.interceptor.CacheResolver} to use.
* <p>If no resolver and no cache manager are set at the operation level, this one is used
* instead of the default.
*/
String cacheResolver() default "";
}<|fim▁end|>
| |
<|file_name|>AssetState.py<|end_file_name|><|fim▁begin|>from .StateBase import StateBase
from neo.Core.Fixed8 import Fixed8
from neo.Core.IO.BinaryReader import BinaryReader
from neo.IO.MemoryStream import StreamManager
from neo.Core.AssetType import AssetType
from neo.Core.UInt160 import UInt160
from neo.Core.Cryptography.Crypto import Crypto
from neo.Core.Cryptography.ECCurve import EllipticCurve, ECDSA
from neo.Core.Size import Size as s
from neo.Core.Size import GetVarSize
class AssetState(StateBase):
def Size(self):
return super(AssetState, self).Size() + s.uint256 + s.uint8 + GetVarSize(
self.Name) + self.Amount.Size() + self.Available.Size() + s.uint8 + s.uint8 + self.Fee.Size() + s.uint160 + self.Owner.Size() + s.uint160 + s.uint160 + s.uint32 + s.uint8
def __init__(self, asset_id=None, asset_type=None, name=None, amount=None, available=None,
precision=0, fee_mode=0, fee=None, fee_addr=None, owner=None,
admin=None, issuer=None, expiration=None, is_frozen=False):
"""
Create an instance.
Args:
asset_id (UInt256):
asset_type (neo.Core.AssetType):
name (str): the asset name.
amount (Fixed8):
available (Fixed8):
precision (int): number of decimals the asset has.
fee_mode (int):
fee (Fixed8):
fee_addr (UInt160): where the fee will be send to.
owner (EllipticCurve.ECPoint):
admin (UInt160): the administrator of the asset.
issuer (UInt160): the issuer of the asset.
expiration (UInt32): the block number on which the asset expires.
is_frozen (bool):
"""
self.AssetId = asset_id
self.AssetType = asset_type
self.Name = name
self.Amount = Fixed8(0) if amount is None else amount
self.Available = Fixed8(0) if available is None else available
self.Precision = precision
self.FeeMode = fee_mode
self.Fee = Fixed8(0) if fee is None else fee
self.FeeAddress = UInt160(data=bytearray(20)) if fee_addr is None else fee_addr
if owner is not None and type(owner) is not EllipticCurve.ECPoint:
raise Exception("Owner must be ECPoint Instance")
self.Owner = owner
self.Admin = admin
self.Issuer = issuer
self.Expiration = expiration
self.IsFrozen = is_frozen
# def Size(self):
# return super(AssetState, self).Size()
@staticmethod
def DeserializeFromDB(buffer):
"""
Deserialize full object.
Args:
buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
Returns:
AssetState:
"""
m = StreamManager.GetStream(buffer)
reader = BinaryReader(m)
account = AssetState()
account.Deserialize(reader)
StreamManager.ReleaseStream(m)
return account
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.Core.IO.BinaryReader):
"""
super(AssetState, self).Deserialize(reader)
self.AssetId = reader.ReadUInt256()
self.AssetType = ord(reader.ReadByte())
self.Name = reader.ReadVarString()
position = reader.stream.tell()
try:
self.Amount = reader.ReadFixed8()
except Exception:
reader.stream.seek(position)
self.Amount = reader.ReadFixed8()
self.Available = reader.ReadFixed8()
self.Precision = ord(reader.ReadByte())
# fee mode
reader.ReadByte()
self.Fee = reader.ReadFixed8()
self.FeeAddress = reader.ReadUInt160()
self.Owner = ECDSA.Deserialize_Secp256r1(reader)
self.Admin = reader.ReadUInt160()
self.Issuer = reader.ReadUInt160()
self.Expiration = reader.ReadUInt32()
self.IsFrozen = reader.ReadBool()
def Serialize(self, writer):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
super(AssetState, self).Serialize(writer)
writer.WriteUInt256(self.AssetId)
writer.WriteByte(self.AssetType)
writer.WriteVarString(self.Name)
if self.Amount.value > -1:
writer.WriteFixed8(self.Amount, unsigned=True)
else:
writer.WriteFixed8(self.Amount)
if type(self.Available) is not Fixed8:
raise Exception("AVAILABLE IS NOT FIXED 8!")
writer.WriteFixed8(self.Available, unsigned=True)
writer.WriteByte(self.Precision)
writer.WriteByte(b'\x00')
writer.WriteFixed8(self.Fee)
writer.WriteUInt160(self.FeeAddress)
self.Owner.Serialize(writer)
writer.WriteUInt160(self.Admin)
writer.WriteUInt160(self.Issuer)
writer.WriteUInt32(self.Expiration)
writer.WriteBool(self.IsFrozen)
def GetName(self):
"""
Get the asset name based on its type.
Returns:
str: 'NEO' or 'NEOGas'
"""
if self.AssetType == AssetType.GoverningToken:
return "NEO"
elif self.AssetType == AssetType.UtilityToken:
return "NEOGas"
if type(self.Name) is bytes:
return self.Name.decode('utf-8')
return self.Name
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
return {
'assetId': self.AssetId.To0xString(),
'assetType': self.AssetType,
'name': self.GetName(),
'amount': self.Amount.value,
'available': self.Available.value,
'precision': self.Precision,
'fee': self.Fee.value,
'address': self.FeeAddress.ToString(),
'owner': self.Owner.ToString(),
'admin': Crypto.ToAddress(self.Admin),
'issuer': Crypto.ToAddress(self.Issuer),
'expiration': self.Expiration,<|fim▁hole|> 'is_frozen': self.IsFrozen
}
def Clone(self):
return AssetState(asset_id=self.AssetId, asset_type=self.AssetType, name=self.Name, amount=self.Amount, available=self.Available, precision=self.Precision, fee=self.Fee, fee_addr=self.FeeAddress, owner=self.Owner, admin=self.Admin, issuer=self.Issuer, expiration=self.Expiration, is_frozen=self.IsFrozen)<|fim▁end|>
| |
<|file_name|>main.py<|end_file_name|><|fim▁begin|># https://github.com/Naish21/themostat
'''
* The MIT License (MIT)
*
* Copyright (c) 2016 Jorge Aranda Moro
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
'''<|fim▁hole|>
WIFISSID='koen'
WIFIPASS='/*Casa*/'
def do_connect():
from network import WLAN
sta_if = WLAN(network.STA_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
sta_if.connect(WIFISSID, WIFIPASS)
while not sta_if.isconnected():
pass
print('network config:', sta_if.ifconfig())
#---End Wifi Config---
from machine import Pin
led = Pin(2, Pin.OUT, value=1)
#---MQTT Sending---
from time import sleep_ms
from ubinascii import hexlify
from machine import unique_id
#import socket
from umqtt import MQTTClient
SERVER = "192.168.31.16"
CLIENT_ID = hexlify(unique_id())
TOPIC1 = b"/cultivo/temp"
TOPIC2 = b"/scultivo/hum"
TOPIC3 = b"/cultivo/alarma"
def envioMQTT(server=SERVER, topic="/cultivo", dato=None):
try:
c = MQTTClient(CLIENT_ID, server)
c.connect()
c.publish(topic, dato)
sleep_ms(200)
c.disconnect()
#led.value(1)
except Exception as e:
pass
#led.value(0)
state = 0
def sub_cb(topic, msg):
global state
print((topic, msg))
if msg == b"on":
led.value(0)
state = 1
elif msg == b"off":
led.value(1)
state = 0
def recepcionMQTT(server=SERVER, topic=TOPIC3):
c = MQTTClient(CLIENT_ID, server)
# Subscribed messages will be delivered to this callback
c.set_callback(sub_cb)
c.connect()
c.subscribe(topic)
print("Connected to %s, subscribed to %s topic" % (server, topic))
try:
c.wait_msg()
finally:
c.disconnect()
#---End MQTT Sending---
#---DHT22---
from dht import DHT22
ds = DHT22(Pin(4)) #DHT22 connected to GPIO4
def medirTemHum():
try:
ds.measure()
tem = ds.temperature()
hum = ds.humidity()
#ed.value(1)
return (tem,hum)
except Exception as e:
#led.value(0)
return (-1,-1)
#---End DHT22---
#---Main Program---
sleep_ms(10000)
while True:
(tem,hum) = medirTemHum()
envioMQTT(SERVER,TOPIC1,str(tem))
envioMQTT(SERVER,TOPIC2,str(hum))
recepcionMQTT()
sleep_ms(10000)
#---END Main Program---<|fim▁end|>
|
#This part is to connect to the WiFi
#In this case: SSID: TP-LINK_F3D4B2 & PASS: 90546747
|
<|file_name|>archive.py<|end_file_name|><|fim▁begin|>from __future__ import print_function
import sys, time
import requests, urllib
import demjson, shelve
import os.path
class Archiver:
def __init__(self):
"""
A class for archiving URLS into the wayback machine
"""
self._machine = "http://archive.org/wayback/available?url="
self._arch = "https://web.archive.org/save/"
self.archived_urls = []
# load data
if os.path.isfile("archived_urls.dat"):
self.archived_urls = self.load_data()
def available(self, url, silent=False):
"""
:param: url
:param: silent=False
Checks if the given URL exists in the wayback machine.
The silent argument if set True does not print anything to the console
"""
print("[Checking]: %s\n" % url) if silent == False else 0
data = demjson.decode(requests.get(self._machine+url).text)["archived_snapshots"]
if "closest" in data:
print(self.print_item(data)) if silent == False else 0
return (data["closest"])["available"]
return False
def load_data(self):
"""
Loads the archived URLS from a file called archived_urls.dat
"""
return shelve.open("archived_urls.dat")["main"]
def out_text(self, filename):
"""
:param: filename
Outputs a list of archived urls into text format
<|fim▁hole|> print("Done.")
def save_data(self):
"""
Saves the archived urls into archived_urls.dat
"""
shelve.open("archived_urls.dat")["main"] = self.archived_urls
def archive(self, url):
"""
:param: url
Archves a url into the wayback machine.
"""
l = requests.get(self._arch+url)
print("Archiving...")
self.archived_urls.append(url)
self.save_data()
def print_item(self, data):
"""
:param: data
Print function for json data for archive data
"""
dat = data["closest"]
stamp = "Archived:%s\nAvailable:%s\nURL:%s\nStatus:%s" % (dat["timestamp"], dat['available'], dat['url'], dat['status'])
return stamp
def save_webpage(self, url, filename):
"""
:param: url
:param: filename
Saves a webpage
"""
print("[OK]: Saving webpage..")
if not os.path.isdir(os.getcwd()+"\\saved_webpages"): os.mkdir("saved_webpages")
open(os.getcwd()+"\\saved_webpages\\"+filename, 'w').write((requests.get(url).text).encode("utf-8"))
if os.path.isfile(os.getcwd()+"\\saved_webpages\\"+filename): print("Done.")
Help = \
" \
Usage: archive.py [option] [option2]\n \
\
Options:\n \
-CH/ch [url] - Check if a URL already exists in the wayback machine and return it's information if it does\n \
-ARCH/arch [url] - Archive a URL\n \
-CHARCH/charch [url] - Archive a url if it doesn't already exists\n \
-OUTT/outt [filename] - Output a list of archived urls in text format\n \
-H/h - Print this help message\n \
-LARCH/larch - print out a list of urls you archived\n \
-SAVE/save [url] [filename] - Save a url into a file"
def main():
global Help
A = Archiver()
args = map(lambda x : x.lower(), sys.argv[1:len(sys.argv)])
print(args)
if len(args) == 2:
print(args[0])
if args[0] == "-ch":
if A.available(args[1]) is True:
print("URL found.")
else:
print("URL not found in wayback machine.")
sys.exit(0)
elif args[0] == "-arch":
A.archive(args[1])
if A.available(args[1], True) is True:
print("[Success]: Archiving is successful")
else:
print("[Error]: Archiving failed!")
b = list(A.archived_urls[len(A.archived_urls)-1])
A.archived_urls.remove(A.archived_urls[len(A.archived_urls)-1])
b.insert(0, "FAILED TO ARCHIVE: ")
A.archived_urls.append(b)
sys.exit(0)
elif args[0] == "-charch":
main = A.available(args[1])
if main is True or main == "True":
print("URL exists.")
elif main is False:
print("URL does not exist.")
A.archive(args[1])
sys.exit(0)
elif args[0] == "-outt":
A.out_text(args[1])
sys.exit(0)
elif len(args) == 3:
if args[0] == "-save":
A.save_webpage(args[1], args[2])
sys.exit(0)
elif len(args) == 1:
if args[0] == "-h":
print("-h")
print(Help)
sys.exit(0)
elif args[0] == "-larch":
print("-larch")
map(lambda x : print(x), A.archived_urls)
sys.exit(0)
else:
print("[Error]: Unknown argument \'%s\'" % args[0])
sys.exit(0)
else:
print("Archiver: No arguments found.\n Type '-h' for help")
sys.exit(0)
if __name__ == "__main__":
main()<|fim▁end|>
|
"""
map(open(filename, 'w').write, map(lambda x : x+"\n",self.archived_urls))
|
<|file_name|>bitcoin_da.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="da" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About AlessioCoin</source>
<translation>Om AlessioCoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>AlessioCoin</b> version</source>
<translation><b>AlessioCoin</b> version</translation>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The BlackCoin developers
Copyright © 2014 The AlessioCoin developers</source>
<translation>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The BlackCoin developers
Copyright © 2014 The AlessioCoin developers</translation>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Dette program er eksperimentelt.
Det er gjort tilgængeligt under MIT/X11-softwarelicensen. Se den medfølgende fil "COPYING" eller http://www.opensource.org/licenses/mit-license.php.
Produktet indeholder software, som er udviklet af OpenSSL Project til brug i OpenSSL Toolkit (http://www.openssl.org/). Kryptografisk software er skrevet af Eric Young ([email protected]), og UPnP-software er skrevet af Thomas Bernard.</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Adressebog</translation>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Dobbeltklik for at redigere adresse eller mærkat</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Opret en ny adresse</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopier den valgte adresse til udklipsholder</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Ny adresse</translation>
</message>
<message>
<location line="-46"/>
<source>These are your AlessioCoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Dette er dine AlessioCoin adresser til at modtage betalinger. Du ønsker måske at give en anden en til af hver afsender, så du kan holde styr på hvem der betaler dig.</translation>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation>&Kopier adresse</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Vis &QR kode</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a AlessioCoin address</source>
<translation>Signerer en meddelelse for at bevise du ejer en AlessioCoin adresse</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Signere & Besked</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Slet den markerede adresse fra listen</translation>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified AlessioCoin address</source>
<translation>Bekræft en meddelelse for at sikre, den blev underskrevet med en specificeret AlessioCoin adresse</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>Bekræft Meddelse</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Slet</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation>Kopier mærkat</translation>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation>Rediger</translation>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation>Eksporter Adresse Bog</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommasepareret fil (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Fejl ved eksportering</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kunne ikke skrive til fil% 1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Mærkat</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(ingen mærkat)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Adgangskodedialog</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Indtast adgangskode</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Ny adgangskode</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Gentag ny adgangskode</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation>Deaktivere trivielle sendmoney når OS konto er kompromitteret. Giver ingen reel sikkerhed.</translation>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation>Kun til renteberegning</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+35"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Indtast den nye adgangskode til tegnebogen.<br/>Brug venligst en adgangskode på <b>10 eller flere tilfældige tegn</b> eller <b>otte eller flere ord</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Krypter tegnebog</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Denne funktion har brug for din tegnebogs adgangskode for at låse tegnebogen op.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Lås tegnebog op</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Denne funktion har brug for din tegnebogs adgangskode for at dekryptere tegnebogen.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Dekrypter tegnebog</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Skift adgangskode</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Indtast den gamle og den nye adgangskode til tegnebogen.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Bekræft tegnebogskryptering</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation>Advarsel: Hvis du krypterer din tegnebog og mister din adgangskode, vil du <b> miste alle dine mønter </ b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Er du sikker på, at du ønsker at kryptere din tegnebog?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>VIGTIGT: Enhver tidligere sikkerhedskopi, som du har lavet af tegnebogsfilen, bør blive erstattet af den nyligt genererede, krypterede tegnebogsfil. Af sikkerhedsmæssige årsager vil tidligere sikkerhedskopier af den ikke-krypterede tegnebogsfil blive ubrugelig i det øjeblik, du starter med at anvende den nye, krypterede tegnebog.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Advarsel: Caps Lock-tasten er aktiveret!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Tegnebog krypteret</translation>
</message>
<message>
<location line="-58"/>
<source>AlessioCoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation>AlessioCoin lukker nu for at afslutte krypteringen. Husk at en krypteret tegnebog ikke fuldt ud beskytter dine mønter mod at blive stjålet af malware som har inficeret din computer.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Tegnebogskryptering mislykkedes</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Tegnebogskryptering mislykkedes på grund af en intern fejl. Din tegnebog blev ikke krypteret.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>De angivne adgangskoder stemmer ikke overens.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>Tegnebogsoplåsning mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Den angivne adgangskode for tegnebogsdekrypteringen er forkert.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Tegnebogsdekryptering mislykkedes</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Tegnebogens adgangskode blev ændret.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+280"/>
<source>Sign &message...</source>
<translation>Underskriv besked...</translation>
</message>
<message>
<location line="+242"/>
<source>Synchronizing with network...</source>
<translation>Synkroniserer med netværk...</translation>
</message>
<message>
<location line="-308"/>
<source>&Overview</source>
<translation>&Oversigt</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Vis generel oversigt over tegnebog</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Transaktioner</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Gennemse transaktionshistorik</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation>&Adressebog</translation>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Redigere listen over gemte adresser og etiketter</translation>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation>&Modtag mønter</translation>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Vis listen over adresser for modtagne betalinger</translation>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation>&Send mønter</translation>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation>Luk</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Afslut program</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about AlessioCoin</source>
<translation>Vis oplysninger om AlessioCoin</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Om Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Vis informationer om Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Indstillinger...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>Krypter tegnebog...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>Sikkerhedskopier tegnebog...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>Skift adgangskode...</translation>
</message>
<message numerus="yes">
<location line="+250"/>
<source>~%n block(s) remaining</source>
<translation><numerusform>~%n blok resterer</numerusform><numerusform>~%n blokke resterende</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation>Overført %1 af %2 blokke af transaktions historie (%3% færdig).</translation>
</message>
<message>
<location line="-247"/>
<source>&Export...</source>
<translation>&Eksporter...</translation>
</message>
<message>
<location line="-62"/>
<source>Send coins to a AlessioCoin address</source>
<translation>Send mønter til en AlessioCoin adresse</translation>
</message>
<message>
<location line="+45"/>
<source>Modify configuration options for AlessioCoin</source>
<translation>Ændre indstillingsmuligheder for AlessioCoin</translation>
</message>
<message>
<location line="+18"/>
<source>Export the data in the current tab to a file</source>
<translation>Eksportere data i den aktuelle fane til en fil</translation>
</message>
<message>
<location line="-14"/>
<source>Encrypt or decrypt wallet</source>
<translation>Kryptere eller dekryptere tegnebog</translation>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation>Lav sikkerhedskopi af tegnebogen til et andet sted</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Skift adgangskode anvendt til tegnebogskryptering</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>Fejlsøgningsvindue</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Åbn fejlsøgnings- og diagnosticeringskonsollen</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>Verificér besked...</translation>
</message>
<message>
<location line="-200"/>
<source>AlessioCoin</source>
<translation>AlessioCoin</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation>Tegnebog</translation>
</message>
<message>
<location line="+178"/>
<source>&About AlessioCoin</source>
<translation>&Om AlessioCoin</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>Vis / skjul</translation>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation>Lås tegnebog</translation>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation>&Lås tegnebog</translation>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation>Lås tegnebog</translation>
</message>
<message>
<location line="+34"/>
<source>&File</source>
<translation>Fil</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>Indstillinger</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>Hjælp</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Faneværktøjslinje</translation>
</message>
<message>
<location line="+8"/>
<source>Actions toolbar</source>
<translation>Fanværktøjslinje</translation>
</message>
<message>
<location line="+13"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testnetværk]</translation>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>AlessioCoin client</source>
<translation>AlessioCoin klient</translation>
</message>
<message numerus="yes">
<location line="+70"/>
<source>%n active connection(s) to AlessioCoin network</source>
<translation><numerusform>%n aktiv forbindelse til AlessioCoin netværk</numerusform><numerusform>%n aktive forbindelser til AlessioCoin netværk</numerusform></translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation>Downloadet %1 blokke af transaktions historie.</translation>
</message>
<message>
<location line="+413"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation>Renter.<br> Din andel er% 1 <br> Netværkets andel er% 2 <br> Forventet tid til at modtage rente %3</translation>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation>Ingen rente fordi tegnebog er låst</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation>Ingen rente fordi tegnebog er offline</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation>Ingen rente fordi tegnebog er ved at synkronisere</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation>Ingen rente fordi der ingen modne mønter eksistere </translation>
</message>
<message numerus="yes">
<location line="-403"/>
<source>%n second(s) ago</source>
<translation><numerusform>%n sekund siden</numerusform><numerusform>%n sekunder siden</numerusform></translation>
</message>
<message>
<location line="-284"/>
<source>&Unlock Wallet...</source>
<translation>Lås tegnebog op</translation>
</message>
<message numerus="yes">
<location line="+288"/>
<source>%n minute(s) ago</source>
<translation><numerusform>%n minut siden</numerusform><numerusform>%n minutter siden</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation><numerusform>%n time siden</numerusform><numerusform>%n timer siden</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation><numerusform>%n dag siden</numerusform><numerusform>%n dage siden</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>Opdateret</translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation>Indhenter...</translation>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation>Sidst modtagne blok blev genereret %1.</translation>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Denne transaktion er over grænsen størrelse. Du kan stadig sende det for et gebyr på %1, der går til de noder, der behandler din transaktion og hjælper med at støtte netværket. Ønsker du at betale gebyret?</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation>Bekræft transaktionsgebyr</translation>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Afsendt transaktion</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Indgående transaktion</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Dato: %1
Beløb: %2
Type: %3
Adresse: %4
</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation>URI håndtering</translation>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid AlessioCoin address or malformed URI parameters.</source>
<translation>URI kan ikke tolkes! Dette kan skyldes en ugyldig AlessioCoin adresse eller misdannede URI parametre.</translation>
</message>
<message>
<location line="+18"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Tegnebog er <b>krypteret</b> og i øjeblikket <b>ulåst</b></translation>
</message>
<message>
<location line="+10"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Tegnebog er <b>krypteret</b> og i øjeblikket <b>låst</b></translation>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation>Sikkerhedskopier Tegnebog</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Tegnebogsdata (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Sikkerhedskopiering Mislykkedes</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Der opstod en fejl under forsøg på at gemme data i tegnebogen til den nye placering.</translation>
</message>
<message numerus="yes">
<location line="+76"/>
<source>%n second(s)</source>
<translation><numerusform>%n sekund</numerusform><numerusform>%n sekunder</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation><numerusform>%n minut</numerusform><numerusform>%n minutter</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation><numerusform>%n time(r)</numerusform><numerusform>%n time(r)</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n dag(e)</numerusform><numerusform>%n dag(e)</numerusform></translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation>Ingen rente</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+109"/>
<source>A fatal error occurred. AlessioCoin can no longer continue safely and will quit.</source>
<translation>Der opstod en fejl under forsøg på at gemme dataene i tegnebogen til den nye placering.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+90"/>
<source>Network Alert</source>
<translation>Netværksadvarsel</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation>Mønt Kontrol</translation>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation>Antal:</translation>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Beløb:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation>Prioritet:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation>Gebyr:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Lav Udgangseffekt:</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation>nej</translation>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation>Efter Gebyr:</translation>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation>Ændre:</translation>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation>(fra)vælg alle</translation>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation>Træ tilstand</translation>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation>Liste tilstand</translation>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Beløb</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation>Mærkat</translation>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation>Bekræftelser</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Bekræftet</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation>Prioritet</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>Kopier adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopier mærkat</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>Kopier beløb</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>Kopier transaktionens ID</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation>Kopier antal</translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation>Kopier transkationsgebyr</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Kopier efter transkationsgebyr</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Kopier bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Kopier prioritet</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Lav udgangseffekt</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Kopier ændring</translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation>højeste</translation>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation>høj</translation>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation>medium-høj</translation>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation>medium</translation>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation>lav-medium</translation>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation>lav</translation>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation>lavest</translation>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation>DUST</translation>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation>ja</translation>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation>Denne etiket bliver rød, hvis transaktionen størrelse er større end 10000 byte.
Det betyder, at et gebyr på mindst %1 per kb er påkrævet.
Kan variere + / - 1 byte per indgang.</translation>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation>Transaktioner med højere prioritet får mere sandsynligt en blok.
Denne etiket bliver rød, hvis prioritet er mindre end "medium".
Det betyder, at et gebyr på mindst %1 per kb er påkrævet.</translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation>Denne etiket bliver rød, hvis nogen modtager et beløb, der er mindre end %1.
Det betyder, at et gebyr på mindst %2 er påkrævet.
Beløb under 0,546 gange det minimale gebyr er vist som DUST.</translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation>Denne etiket bliver rød, hvis ændringen er mindre end %1.
Det betyder, at et gebyr på mindst %2 er påkrævet.</translation>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(ingen mærkat)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation>skift fra %1 (%2)</translation>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation>(skift)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Rediger adresse</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>Mærkat</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>Etiketten er forbundet med denne post i adressekartoteket</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>Adressen er forbundet med denne post i adressekartoteket. Dette kan kun ændres til sende adresser.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation>Ny modtagelsesadresse</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Ny afsendelsesadresse</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Rediger modtagelsesadresse</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Rediger afsendelsesadresse</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Den indtastede adresse "%1" er allerede i adressebogen.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid AlessioCoin address.</source>
<translation>Den indtastede adresse "%1" er ikke en gyldig AlessioCoin adresse.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Kunne ikke låse tegnebog op.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Ny nøglegenerering mislykkedes.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+420"/>
<location line="+12"/>
<source>AlessioCoin-Qt</source>
<translation>AlessioCoin-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>version</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Anvendelse:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>Kommandolinjeparametrene</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>UI opsætning</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Indstil sprog, for eksempel "de_DE" (standard: system locale)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Start minimeret</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Vis splash skærm ved opstart (default: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Indstillinger</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>Generelt</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation>Valgfri transaktionsgebyr pr kB, som hjælper med at sikre dine transaktioner bliver behandlet hurtigt. De fleste transaktioner er 1 kB. Gebyr 0,01 anbefales.</translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Betal transaktionsgebyr</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation>Reserveret beløb deltager ikke i forrentning og er derfor tilrådighed til enhver tid.</translation>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation>Reserve</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start AlessioCoin after logging in to the system.</source>
<translation>Automatisk start AlessioCoin efter at have logget ind på systemet.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start AlessioCoin on system login</source>
<translation>&Start AlessioCoin ved systems login</translation>
</message>
<message>
<location line="+7"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation>Frigør blok og adressedatabaser ved lukning. Det betyder, at de kan flyttes til et anden data-bibliotek, men det sinker lukning. Tegnebogen er altid frigjort.</translation>
</message>
<message>
<location line="+3"/>
<source>&Detach databases at shutdown</source>
<translation>&Frigør databaser ved lukning</translation>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>Netværk</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the AlessioCoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Automatisk åbne AlessioCoin klient-port på routeren. Dette virker kun, når din router understøtter UPnP og er det er aktiveret.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Konfigurer port vha. UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the AlessioCoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Opret forbindelse til AlessioCoin netværk via en SOCKS proxy (fx ved tilslutning gennem Tor).</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Tilslut gennem SOCKS proxy:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Proxy-IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>IP-adressen på proxy (f.eks 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>Port:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Porten på proxyen (f.eks. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS-version</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>SOCKS-version af proxyen (f.eks. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>Vindue</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Vis kun et statusikon efter minimering af vinduet.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>Minimer til statusfeltet i stedet for proceslinjen</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimer i stedet for at afslutte programmet, når vinduet lukkes. Når denne indstilling er valgt, vil programmet kun blive lukket, når du har valgt Afslut i menuen.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>Minimer ved lukning</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>Visning</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>Brugergrænsefladesprog:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting AlessioCoin.</source>
<translation>Sproget i brugergrænsefladen kan indstilles her. Denne indstilling vil træde i kraft efter genstart af AlessioCoin tegnebog.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>Enhed at vise beløb i:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Vælg den standard underopdelingsenhed, som skal vises i brugergrænsefladen og ved afsendelse af bitcoins.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show AlessioCoin addresses in the transaction list or not.</source>
<translation>Få vist AlessioCoin adresser på listen over transaktioner eller ej.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>Vis adresser i transaktionsliste</translation>
</message>
<message>
<location line="+7"/>
<source>Whether to show coin control features or not.</source>
<translation> Vis mønt kontrol funktioner eller ej.</translation>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation>Vis mønt & kontrol funktioner (kun for eksperter!)</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>Annuller</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Anvend</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation>standard</translation>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation>Advarsel</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting AlessioCoin.</source>
<translation>Denne indstilling vil træde i kraft efter genstart af AlessioCoin.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Ugyldig proxy-adresse</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Formular</translation>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the AlessioCoin network after a connection is established, but this process has not completed yet.</source>
<translation>De viste oplysninger kan være forældet. Din tegnebog synkroniserer automatisk med AlessioCoin netværket efter en forbindelse er etableret, men denne proces er ikke afsluttet endnu.</translation>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation>Rente:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Ubekræftede:</translation>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation>Tegnebog</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation>Brugbar:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>Din nuværende tilgængelige saldo</translation>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation>Umodne:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Udvunden saldo, som endnu ikke er modnet</translation>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation>Total:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>Din nuværende totale saldo</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Nyeste transaktioner</b></translation>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Summen af transaktioner, der endnu mangler at blive bekræftet, og ikke tæller mod den nuværende balance</translation>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation>I alt mønter, der bliver berentet, og endnu ikke tæller mod den nuværende balance</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation>ikke synkroniseret</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>QR Kode Dialog</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Betalingsanmodning</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Antal:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Label:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Besked:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Gem Som...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Fejl kode URI i QR kode.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>Det indtastede beløb er ugyldig, venligst tjek igen.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>Resulterende URI for lang, prøv at reducere teksten til etiketten / besked.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Gem QR kode</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>PNG billede (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Klientnavn</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+348"/>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Klientversion</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>Information</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Anvender OpenSSL-version</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Opstartstid</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Netværk</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Antal forbindelser</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source><|fim▁hole|> <message>
<location line="+23"/>
<source>Block chain</source>
<translation>Blokkæde</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Nuværende antal blokke</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Estimeret antal blokke</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Tidsstempel for seneste blok</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>Åbn</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Kommandolinjeparametrene</translation>
</message>
<message>
<location line="+7"/>
<source>Show the AlessioCoin-Qt help message to get a list with possible AlessioCoin command-line options.</source>
<translation>Vis AlessioCoin-Qt hjælpe besked for at få en liste med mulige AlessioCoin kommandolinjeparametre.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Vis</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>Konsol</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Byggedato</translation>
</message>
<message>
<location line="-104"/>
<source>AlessioCoin - Debug window</source>
<translation>AlessioCoin - Debug vindue</translation>
</message>
<message>
<location line="+25"/>
<source>AlessioCoin Core</source>
<translation>AlessioCoin Kerne</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Fejlsøgningslogfil</translation>
</message>
<message>
<location line="+7"/>
<source>Open the AlessioCoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Åbn AlessioCoin debug logfilen fra den nuværende data mappe. Dette kan tage et par sekunder for store logfiler.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Ryd konsol</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-33"/>
<source>Welcome to the AlessioCoin RPC console.</source>
<translation>Velkommen til AlessioCoin RPC-konsol.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Brug op og ned-piletasterne til at navigere historikken og <b>Ctrl-L</b> til at rydde skærmen.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Tast <b>help</b> for en oversigt over de tilgængelige kommandoer.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Send bitcoins</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation>Mønt Kontrol Egenskaber</translation>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation>Input ...</translation>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation>Automatisk valgt</translation>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation>Utilstrækkelig midler!</translation>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation>Antal:</translation>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation>0</translation>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Beløb:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 CAM</source>
<translation>123.456 CAM {0.00 ?}</translation>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation>Prioritet:</translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation>medium</translation>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation>Gebyr</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Lav udgangseffekt</translation>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation>nej</translation>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation>Efter gebyr</translation>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation>Skift</translation>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation>Ændre adresse</translation>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Send til flere modtagere på en gang</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Tilføj modtager</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Fjern alle transaktions omkostnings felter </translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Ryd alle</translation>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 CAM</source>
<translation>123.456 CAM</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Bekræft afsendelsen</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>Afsend</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a AlessioCoin address (e.g. SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation>Indtast en AlessioCoin-adresse (f.eks SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</translation>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation>Kopier antal</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopier beløb</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation>Kopier transkationsgebyr</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Kopier efter transkationsgebyr</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Kopier bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Kopier prioritet</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Kopier lav produktion</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Kopier forandring</translation>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> til %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Bekræft afsendelse af bitcoins</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Er du sikker på du vil sende% 1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>og</translation>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Modtagerens adresse er ikke gyldig. Tjek venligst adressen igen.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Beløbet til betaling skal være større end 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Beløbet overstiger din saldo.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Totalen overstiger din saldo, når %1 transaktionsgebyr er inkluderet.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Duplikeret adresse fundet. Du kan kun sende til hver adresse en gang pr. afsendelse.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation>Fejl: Transaktion oprettelse mislykkedes.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Fejl: Transaktionen blev afvist. Dette kan ske, hvis nogle af mønterne i din tegnebog allerede er blevet brugt, som hvis du brugte en kopi af wallet.dat og mønterne blev brugt i kopien, men ikke markeret som brugt her.</translation>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid AlessioCoin address</source>
<translation>ADVARSEL: Ugyldig AlessioCoin adresse</translation>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(ingen mærkat)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation>ADVARSEL: ukendt adresse forandring</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Form</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>Beløb:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Betal til:</translation>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Indtast en mærkat for denne adresse for at føje den til din adressebog</translation>
</message>
<message>
<location line="+9"/>
<source>&Label:</source>
<translation>Mærkat:</translation>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation>Adressen til at sende betalingen til (f.eks SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</translation>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation>Vælg adresse fra adressebogen</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Indsæt adresse fra udklipsholderen</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Fjern denne modtager</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a AlessioCoin address (e.g. SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation>Indtast en AlessioCoin-adresse (f.eks SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signature - Underskriv/verificér en besked</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>Underskriv besked</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Du kan underskrive beskeder med dine Bitcoin-adresser for at bevise, at de tilhører dig. Pas på ikke at underskrive noget vagt, da phisingangreb kan narre dig til at overdrage din identitet. Underskriv kun fuldt detaljerede udsagn, du er enig i.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation>Adresse til at underskrive meddelelsen med (f.eks SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</translation>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation>Vælg en adresse fra adressebogen</translation>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Indsæt adresse fra udklipsholderen</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Indtast beskeden, du ønsker at underskrive</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Kopier den nuværende underskrift til systemets udklipsholder</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this AlessioCoin address</source>
<translation>Underskriv brevet for at bevise du ejer denne AlessioCoin adresse</translation>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>Nulstil alle "underskriv besked"-felter</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Ryd alle</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>Verificér besked</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Indtast den underskrevne adresse, beskeden (inkluder linjeskift, mellemrum mv. nøjagtigt, som de fremgår) og underskriften for at verificére beskeden. Vær forsigtig med ikke at lægge mere i underskriften end besked selv, så du undgår at blive narret af et man-in-the-middle-angreb.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation>Adressen meddelelse blev underskrevet med (f.eks SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified AlessioCoin address</source>
<translation>Kontroller meddelelsen for at sikre, at den blev indgået med den angivne AlessioCoin adresse</translation>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>Nulstil alle "verificér besked"-felter</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a AlessioCoin address (e.g. SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation>Indtast en AlessioCoin-adresse (f.eks SXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Klik "Underskriv besked" for at generere underskriften</translation>
</message>
<message>
<location line="+3"/>
<source>Enter AlessioCoin signature</source>
<translation>Indtast AlessioCoin underskrift</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Den indtastede adresse er ugyldig.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Tjek venligst adressen, og forsøg igen.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Den indtastede adresse henviser ikke til en nøgle.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Tegnebogsoplåsning annulleret.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Den private nøgle for den indtastede adresse er ikke tilgængelig.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Underskrivning af besked mislykkedes.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Besked underskrevet.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Underskriften kunne ikke afkodes.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Tjek venligst underskriften, og forsøg igen.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>Underskriften matcher ikke beskedens indhold.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Verificéring af besked mislykkedes.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Besked verificéret.</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+19"/>
<source>Open until %1</source>
<translation>Åben indtil %1</translation>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation><numerusform>Åben for %n blok</numerusform><numerusform>Åben for %n blok(ke)</numerusform></translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation>konflikt</translation>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/offline</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/ubekræftet</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 bekræftelser</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Status</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, transmitteret igennem %n knude(r)</numerusform><numerusform>, transmitteret igennem %n knude(r)</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Kilde</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Genereret</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Fra</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Til</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>egen adresse</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>mærkat</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Kredit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>modner efter yderligere %n blok(ke)</numerusform><numerusform>modner efter yderligere %n blok(ke)</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>ikke accepteret</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Debet</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Transaktionsgebyr</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Nettobeløb</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Besked</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Kommentar</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Transaktionens ID</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Generet mønter skal modne 510 blokke, før de kan blive brugt. Når du genererede denne blok blev det transmitteret til netværket, der tilføjes til blokkæden. Hvis det mislykkes at komme ind i kæden, vil dens tilstand ændres til "ikke godkendt", og det vil ikke være brugbar. Dette kan lejlighedsvis ske, hvis en anden node genererer en blok et par sekunder efter din.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Fejlsøgningsinformation</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transaktion</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>Input</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Beløb</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>sand</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>falsk</translation>
</message>
<message>
<location line="-211"/>
<source>, has not been successfully broadcast yet</source>
<translation>, er ikke blevet transmitteret endnu</translation>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation>ukendt</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Transaktionsdetaljer</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Denne rude viser en detaljeret beskrivelse af transaktionen</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+226"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Beløb</translation>
</message>
<message>
<location line="+60"/>
<source>Open until %1</source>
<translation>Åben indtil %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Bekræftet (%1 bekræftelser)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Åben %n blok(ke) yderligere</numerusform><numerusform>Åben %n blok(ke) yderligere</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation>Offline</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation>Ubekræftede</translation>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation>Bekræftelse (% 1 af% 2 anbefalede bekræftelser)</translation>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation>Konflikt</translation>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation>Umodne (% 1 bekræftelser, vil være tilgængelige efter% 2)</translation>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Denne blok blev ikke modtaget af nogen andre knuder og vil formentlig ikke blive accepteret!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Genereret, men ikke accepteret</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Modtaget med</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Modtaget fra</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Sendt til</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Betaling til dig selv</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Udvundne</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+190"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Transaktionsstatus. Hold musen over dette felt for at vise antallet af bekræftelser.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Dato og klokkeslæt for modtagelse af transaktionen.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Transaktionstype.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Destinationsadresse for transaktion.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Beløb fjernet eller tilføjet balance.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation>Alle</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>I dag</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Denne uge</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Denne måned</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Sidste måned</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Dette år</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Interval...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Modtaget med</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Sendt til</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Til dig selv</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Udvundne</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Andet</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Indtast adresse eller mærkat for at søge</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Minimumsbeløb</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Kopier adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopier mærkat</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopier beløb</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Kopier transaktionens ID</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Rediger mærkat</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Vis transaktionsdetaljer</translation>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation>Exportere transaktionsdata</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommasepareret fil (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Bekræftet</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Mærkat</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Beløb</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Fejl exporting</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kunne ikke skrive til filen% 1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Interval:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>til</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+206"/>
<source>Sending...</source>
<translation>Sender...</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>AlessioCoin version</source>
<translation>AlessioCoin version</translation>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Anvendelse:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or AlessioCoind</source>
<translation>Send kommando til-server eller AlessioCoind</translation>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Liste over kommandoer</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>Få hjælp til en kommando</translation>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation>Indstillinger:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: AlessioCoin.conf)</source>
<translation>Angiv konfigurationsfil (default: AlessioCoin.conf)</translation>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: AlessioCoind.pid)</source>
<translation>Angiv pid fil (standard: AlessioCoind.pid)</translation>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation>Angiv tegnebogs fil (indenfor data mappe)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Angiv datakatalog</translation>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Angiv databasecachestørrelse i megabytes (standard: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation>Set database disk logstørrelsen i megabyte (standard: 100)</translation>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 51737 or testnet: 51997)</source>
<translation>Lyt efter forbindelser på <port> (default: 51737 eller Testnet: 51997)</translation>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Oprethold højest <n> forbindelser til andre i netværket (standard: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Forbind til en knude for at modtage adresse, og afbryd</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>Angiv din egen offentlige adresse</translation>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation>Binder til en given adresse. Brug [host]: port notation for IPv6</translation>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation>Opbevar dine mønter for at støtte netværket og få belønning (default: 1)</translation>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Grænse for afbrydelse til dårlige forbindelser (standard: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Antal sekunder dårlige forbindelser skal vente før reetablering (standard: 86400)</translation>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Der opstod en fejl ved angivelse af RPC-porten %u til at lytte på IPv4: %s</translation>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation>Frigør blok og adresse databaser. Øg shutdown tid (default: 0)</translation>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Fejl: Transaktionen blev afvist. Dette kan ske, hvis nogle af mønterne i din pung allerede er blevet brugt, som hvis du brugte en kopi af wallet.dat og mønterne blev brugt i kopien, men ikke markeret her.</translation>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation>Fejl: Denne transaktion kræver et transaktionsgebyr på mindst% s på grund af dens størrelse, kompleksitet, eller anvendelse af nylig modtaget midler</translation>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 51736 or testnet: 51996)</source>
<translation>Spor efter JSON-RPC-forbindelser på <port> (default: 51736 eller Testnet: 51996)</translation>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Accepter kommandolinje- og JSON-RPC-kommandoer</translation>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation>Fejl: Transaktion oprettelse mislykkedes</translation>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation>Fejl: Wallet låst, ude af stand til at skabe transaktion</translation>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation>Importerer blockchain datafil.</translation>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation>Import af bootstrap blockchain datafil.</translation>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Kør i baggrunden som en service, og accepter kommandoer</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>Brug testnetværket</translation>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Accepter forbindelser udefra (standard: 1 hvis hverken -proxy eller -connect)</translation>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Der opstod en fejl ved angivelse af RPC-porten %u til at lytte på IPv6, falder tilbage til IPv4: %s</translation>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation>Fejl initialisering database miljø% s! For at gendanne, BACKUP denne mappe, og derefter fjern alt bortset fra wallet.dat.</translation>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Angiv maksimal størrelse på high-priority/low-fee transaktioner i bytes (standard: 27000)</translation>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Advarsel: -paytxfee er sat meget højt! Dette er det gebyr du vil betale, hvis du sender en transaktion.</translation>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong AlessioCoin will not work properly.</source>
<translation>Advarsel: Kontroller venligst, at computerens dato og klokkeslæt er korrekt! Hvis dit ur er forkert vil AlessioCoin ikke fungere korrekt.</translation>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Advarsel: fejl under læsning af wallet.dat! Alle nøgler blev læst korrekt, men transaktionsdata eller adressebogsposter kan mangle eller være forkerte.</translation>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Advarsel: wallet.dat ødelagt, data reddet! Oprindelig wallet.net gemt som wallet.{timestamp}.bak i %s; hvis din saldo eller dine transaktioner er forkert, bør du genskabe fra en sikkerhedskopi.</translation>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Forsøg at genskabe private nøgler fra ødelagt wallet.dat</translation>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation>Blokoprettelsestilvalg:</translation>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation>Tilslut kun til de(n) angivne knude(r)</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Find egen IP-adresse (standard: 1 når lytter og ingen -externalip)</translation>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Lytning på enhver port mislykkedes. Brug -listen=0, hvis du ønsker dette.</translation>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation>Find peer bruges DNS-opslag (default: 1)</translation>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation>Synkroniser checkpoints politik (default: streng)</translation>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation>Ugyldig-tor-adresse: '% s'</translation>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation>Ugyldigt beløb for-reservebalance = <beløb></translation>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Maksimum for modtagelsesbuffer pr. forbindelse, <n>*1000 bytes (standard: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Maksimum for afsendelsesbuffer pr. forbindelse, <n>*1000 bytes (standard: 1000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Tilslut kun til knuder i netværk <net> (IPv4, IPv6 eller Tor)</translation>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Output ekstra debugging information. Indebærer alle andre-debug * muligheder</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Output ekstra netværk debugging information</translation>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation>Prepend debug output med tidsstempel</translation>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>SSL-indstillinger: (se Bitcoin Wiki for SSL-opsætningsinstruktioner)</translation>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Vælg den version af socks proxy du vil bruge (4-5, standard: 5)</translation>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Send sporings-/fejlsøgningsinformation til konsollen i stedet for debug.log filen</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Send trace / debug info til debugger</translation>
</message>
<message>
<location line="+28"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Indstil maks. blok størrelse i bytes (standard: 250000)</translation>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Angiv minimumsblokstørrelse i bytes (standard: 0)</translation>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Formindsk debug.log filen ved klientopstart (standard: 1 hvis ikke -debug)</translation>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Angiv tilslutningstimeout i millisekunder (standard: 5000)</translation>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation>Kan ikke logge checkpoint, forkert checkpointkey?
</translation>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Forsøg at bruge UPnP til at konfigurere den lyttende port (standard: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Forsøg at bruge UPnP til at konfigurere den lyttende port (standard: 1 når lytter)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Brug proxy til at nå tor skjulte services (Standard: samme som-proxy)</translation>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation>Brugernavn til JSON-RPC-forbindelser</translation>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation>Bekræfter database integritet ...</translation>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation>ADVARSEL: synkroniseret checkpoint overtrædelse opdaget, men skibbet!</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation>Advarsel: Diskplads lav!</translation>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Advarsel: Denne version er forældet, opgradering påkrævet!</translation>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat ødelagt, redning af data mislykkedes</translation>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation>Adgangskode til JSON-RPC-forbindelser</translation>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=AlessioCoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "AlessioCoin Alert" [email protected]
</source>
<translation>% s, skal du indstille et rpcpassword i konfigurationsfilen:
% s
Det anbefales at bruge følgende tilfældig adgangskode:
rpcuser = AlessioCoinrpc
rpcpassword =% s
(du behøver ikke at huske denne adgangskode)
Brugernavn og adgangskode må ikke være den samme.
Hvis filen ikke findes, skal du oprette den med filtilladelser ejer-læsbar-kun.
Det kan også anbefales at sætte alertnotify så du får besked om problemer;
for eksempel: alertnotify = echo%% s | mail-s "AlessioCoin Alert" [email protected]
</translation>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation>Find peers der bruger internet relay chat (default: 1) {? 0)}</translation>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation>Synkroniser tid med andre noder. Deaktiver, hvis tiden på dit system er præcis eksempelvis synkroniseret med NTP (default: 1)</translation>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation>Når du opretter transaktioner ignoreres input med værdi mindre end dette (standard: 0,01)</translation>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Tillad JSON-RPC-forbindelser fra bestemt IP-adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Send kommandoer til knude, der kører på <ip> (standard: 127.0.0.1)</translation>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Udfør kommando, når den bedste blok ændres (%s i kommandoen erstattes med blokhash)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Udfør kommando, når en transaktion i tegnebogen ændres (%s i kommandoen erstattes med TxID)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation>Kræver en bekræftelser for forandring (default: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation>Gennemtving transaktions omkostninger scripts til at bruge canoniske PUSH operatører (default: 1)</translation>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Udfør kommando, når en relevant advarsel er modtaget (% s i cmd erstattes af meddelelse)</translation>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>Opgrader tegnebog til seneste format</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Angiv nøglepoolstørrelse til <n> (standard: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Gennemsøg blokkæden for manglende tegnebogstransaktioner</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation>Hvor mange blokke til at kontrollere ved opstart (standard: 2500, 0 = alle)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation>Hvor grundig blok verifikation er (0-6, default: 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation>Importere blokke fra ekstern blk000?. Dat fil</translation>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Brug OpenSSL (https) for JSON-RPC-forbindelser</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Servercertifikat-fil (standard: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Serverens private nøgle (standard: server.pem)</translation>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Acceptable ciphers (default: TLSv1 + HØJ:! SSLv2: aNULL: eNULL: AH: 3DES: @ styrke)</translation>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation>Fejl: Pung låst for at udregne rente, ude af stand til at skabe transaktion.</translation>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation>ADVARSEL: Ugyldig checkpoint fundet! Viste transaktioner er måske ikke korrekte! Du kan være nødt til at opgradere, eller underrette udviklerne.</translation>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation>Denne hjælpebesked</translation>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation>Wallet% s placeret udenfor data mappe% s.</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. AlessioCoin is probably already running.</source>
<translation>Kan ikke få en lås på data mappe% s. AlessioCoin kører sikkert allerede.</translation>
</message>
<message>
<location line="-98"/>
<source>AlessioCoin</source>
<translation>AlessioCoin</translation>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Kunne ikke tildele %s på denne computer (bind returnerede fejl %d, %s)</translation>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation>Tilslut gennem socks proxy</translation>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Tillad DNS-opslag for -addnode, -seednode og -connect</translation>
</message>
<message>
<location line="+122"/>
<source>Loading addresses...</source>
<translation>Indlæser adresser...</translation>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation>Fejl ved indlæsning af blkindex.dat</translation>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Fejl ved indlæsning af wallet.dat: Tegnebog ødelagt</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of AlessioCoin</source>
<translation>Fejl ved indlæsning af wallet.dat: Wallet kræver en nyere version af AlessioCoin</translation>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart AlessioCoin to complete</source>
<translation>Det er nødvendig for wallet at blive omskrevet: Genstart AlessioCoin for fuldføre</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>Fejl ved indlæsning af wallet.dat</translation>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Ugyldig -proxy adresse: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Ukendt netværk anført i -onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Ukendt -socks proxy-version: %i</translation>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Kan ikke finde -bind adressen: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Kan ikke finde -externalip adressen: '%s'</translation>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Ugyldigt beløb for -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation>Fejl: kunne ikke starte node</translation>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation>Sender...</translation>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>Ugyldigt beløb</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>Manglende dækning</translation>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation>Indlæser blokindeks...</translation>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Tilføj en knude til at forbinde til og forsøg at holde forbindelsen åben</translation>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. AlessioCoin is probably already running.</source>
<translation>Kunne ikke binde sig til% s på denne computer. AlessioCoin kører sikkert allerede.</translation>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Gebyr pr KB som tilføjes til transaktioner, du sender</translation>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation>Ugyldigt beløb for-mininput = <beløb>: '% s'</translation>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation>Indlæser tegnebog...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>Kan ikke nedgradere tegnebog</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation>Kan ikke initialisere keypool</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>Kan ikke skrive standardadresse</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>Genindlæser...</translation>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation>Indlæsning gennemført</translation>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation>For at bruge %s mulighed</translation>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation>Fejl</translation>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Du skal angive rpcpassword=<password> i konfigurationsfilen:
%s
Hvis filen ikke eksisterer, opret den og giv ingen andre end ejeren læserettighed.</translation>
</message>
</context>
</TS><|fim▁end|>
|
<translation>På testnet</translation>
</message>
|
<|file_name|>UpdateReader.java<|end_file_name|><|fim▁begin|>package io.github.ageofwar.telejam.updates;
import io.github.ageofwar.telejam.Bot;
import io.github.ageofwar.telejam.TelegramException;
import io.github.ageofwar.telejam.methods.GetUpdates;
import java.io.IOException;
import java.util.Collections;
import java.util.Objects;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.function.LongUnaryOperator;
/**
* Utility class that reads new updates received from a bot.
*
* @author Michi Palazzo
*/
public final class UpdateReader implements AutoCloseable {
private final Bot bot;
private final ConcurrentLinkedQueue<Update> updates;
private final LongUnaryOperator backOff;
private long lastUpdateId;
/**
* Constructs an UpdateReader.
*
* @param bot the bot that receive updates
* @param backOff back off to be used when long polling fails
*/
public UpdateReader(Bot bot, LongUnaryOperator backOff) {
this.bot = Objects.requireNonNull(bot);
this.backOff = Objects.requireNonNull(backOff);
updates = new ConcurrentLinkedQueue<>();
lastUpdateId = -1;
}
/**
* Constructs an UpdateReader.
*
* @param bot the bot that receive updates
*/
public UpdateReader(Bot bot) {
this(bot, a -> 500L);
}
/**
* Returns the number of updates that can be read from this update reader without blocking by the
* next invocation read method for this update reader. The next invocation
* might be the same thread or another thread.
* If the available updates are more than {@code Integer.MAX_VALUE}, returns
* {@code Integer.MAX_VALUE}.
*
* @return the number of updates that can be read from this update reader
* without blocking by the next invocation read method
*/
public int available() {
return updates.size();
}
/**
* Tells whether this stream is ready to be read.
*
* @return <code>true</code> if the next read() is guaranteed not to block for input,
* <code>false</code> otherwise. Note that returning false does not guarantee that the
* next read will block.
*/
public boolean ready() {
return !updates.isEmpty();
}
/**
* Reads one update from the stream.
*
* @return the read update
* @throws IOException if an I/O Exception occurs
* @throws InterruptedException if any thread has interrupted the current
* thread while waiting for updates
*/
public Update read() throws IOException, InterruptedException {
if (!ready()) {
for (long attempts = 0; getUpdates() == 0; attempts++) {
Thread.sleep(backOff.applyAsLong(attempts));
}
}
return updates.remove();
}
/**
* Retrieves new updates received from the bot.
*
* @return number of updates received
* @throws IOException if an I/O Exception occurs
*/
public int getUpdates() throws IOException {
try {
Update[] newUpdates = getUpdates(lastUpdateId + 1);
Collections.addAll(updates, newUpdates);
if (newUpdates.length > 0) {
lastUpdateId = newUpdates[newUpdates.length - 1].getId();
}
return newUpdates.length;
} catch (Throwable e) {
if (!(e instanceof TelegramException)) {
lastUpdateId++;
}
throw e;
}
}
/**
* Discards buffered updates and all received updates.
*
* @throws IOException if an I/O Exception occurs
*/
public void discardAll() throws IOException {
Update[] newUpdate = getUpdates(-1);
if (newUpdate.length == 1) {
lastUpdateId = newUpdate[0].getId();
}
updates.clear();
}<|fim▁hole|> private Update[] getUpdates(long offset) throws IOException {
GetUpdates getUpdates = new GetUpdates()
.offset(offset)
.allowedUpdates();
return bot.execute(getUpdates);
}
@Override
public void close() throws IOException {
try {
Update nextUpdate = updates.peek();
getUpdates(nextUpdate != null ? nextUpdate.getId() : lastUpdateId + 1);
lastUpdateId = -1;
updates.clear();
} catch (IOException e) {
throw new IOException("Unable to close update reader", e);
}
}
}<|fim▁end|>
| |
<|file_name|>test.ts<|end_file_name|><|fim▁begin|>/*
* @license Apache-2.0
*
* Copyright (c) 2019 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<|fim▁hole|>import fibonacciIndex = require( './index' );
// TESTS //
// The function returns a number...
{
fibonacciIndex( 7 ); // $ExpectType number
}
// The function does not compile if provided a value other than a number...
{
fibonacciIndex( true ); // $ExpectError
fibonacciIndex( false ); // $ExpectError
fibonacciIndex( null ); // $ExpectError
fibonacciIndex( undefined ); // $ExpectError
fibonacciIndex( '5' ); // $ExpectError
fibonacciIndex( [] ); // $ExpectError
fibonacciIndex( {} ); // $ExpectError
fibonacciIndex( ( x: number ): number => x ); // $ExpectError
}
// The function does not compile if provided insufficient arguments...
{
fibonacciIndex(); // $ExpectError
}<|fim▁end|>
| |
<|file_name|>create_geotiffs.py<|end_file_name|><|fim▁begin|>from optparse import make_option
from optparse import OptionParser
import logging
#import os
#import sys
import contextlib
#import hashlib
import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
#from django.db.models import Q
import dateutil
import netCDF4
from lizard_neerslagradar import netcdf
logger = logger = logging.getLogger(__name__)
class Command(BaseCommand):
args = ""
help = "Create a geotiff per timestep from the radar.nc file."
option_list = BaseCommand.option_list + (
make_option(
"--from", action="store", type="string",
dest="from_", default="2011-01-07",
help="Generate geotiffs starting from this datetime. "
"Use a string in the format YYYY-MM-DD HH:MM "
"(fuzzy substrings are allowed)"),
make_option("--skip-existing", action="store_true",
dest="skip_existing", default=False,
help="Skip existing geotiffs"),
)
def handle(self, *args, **options):
parser = OptionParser(option_list=self.option_list)
(options, args) = parser.parse_args()
logger.warn("IGNORED from=%s", options.from_)
logger.warn("IGNORED skip_existing=%s", options.skip_existing)
time_from = dateutil.parser.parse('2011-01-07T00:00:00.000Z')
time_to = dateutil.parser.parse('2011-01-08T00:00:00.000Z')
times_list = [time_from]
if time_to:
interval = datetime.timedelta(minutes=5)
time = time_from
while time < time_to:
time += interval
times_list.append(time)
nc = netCDF4.Dataset(settings.RADAR_NC_PATH, 'r')
with contextlib.closing(nc):
for time in times_list:<|fim▁hole|> path = netcdf.time_2_path(time)
netcdf.mk_geotiff(nc, time, path)
logger.info('Created geotiff for {}'.format(time))
except:
logger.exception(
'While creating geotiff for {}'.format(time))<|fim▁end|>
|
try:
|
<|file_name|>craytoolchain.py<|end_file_name|><|fim▁begin|>##
# Copyright 2015-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing Cray toolchains, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Guilherme Peretti Pezzi (CSCS)
@author: Petar Forai (IMP/IMBA)
"""
from easybuild.easyblocks.generic.bundle import Bundle
from easybuild.tools.build_log import EasyBuildError
KNOWN_PRGENVS = ['PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-nvidia', 'PrgEnv-pgi']<|fim▁hole|>
class CrayToolchain(Bundle):
"""
Compiler toolchain: generate module file only, nothing to build/install
"""
def prepare_step(self, *args, **kwargs):
"""Prepare build environment (skip loaded of dependencies)."""
kwargs['load_tc_deps_modules'] = False
super(CrayToolchain, self).prepare_step(*args, **kwargs)
def make_module_dep(self):
"""
Generate load/swap statements for dependencies in the module file
"""
prgenv_mod = None
# collect 'swap' statement for dependencies (except PrgEnv)
swap_deps = []
for dep in self.toolchain.dependencies:
mod_name = dep['full_mod_name']
# determine versionless module name, e.g. 'fftw/3.3.4.1' => 'fftw'
dep_name = '/'.join(mod_name.split('/')[:-1])
if mod_name.startswith('PrgEnv'):
prgenv_mod = mod_name
else:
swap_deps.append(self.module_generator.swap_module(dep_name, mod_name).lstrip())
self.log.debug("Swap statements for dependencies of %s: %s", self.full_mod_name, swap_deps)
if prgenv_mod is None:
raise EasyBuildError("Could not find a PrgEnv-* module listed as dependency: %s",
self.toolchain.dependencies)
# unload statements for other PrgEnv modules
prgenv_unloads = ['']
for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]:
is_loaded_guard = self.module_generator.is_loaded(prgenv)
unload_stmt = self.module_generator.unload_module(prgenv).strip()
prgenv_unloads.append(self.module_generator.conditional_statement(is_loaded_guard, unload_stmt))
# load statement for selected PrgEnv module (only when not loaded yet)
prgenv_load = self.module_generator.load_module(prgenv_mod, recursive_unload=False)
txt = '\n'.join(prgenv_unloads + [prgenv_load] + swap_deps)
return txt<|fim▁end|>
| |
<|file_name|>update_doc_change_dates.py<|end_file_name|><|fim▁begin|>import sys
from django.core.management.base import BaseCommand
from ietf.community.constants import SIGNIFICANT_STATES
from ietf.community.models import DocumentChangeDates
from ietf.doc.models import Document
class Command(BaseCommand):
help = (u"Update drafts in community lists by reviewing their rules")
def handle(self, *args, **options):
documents = Document.objects.filter(type='draft')
index = 1
total = documents.count()
for doc in documents.iterator():
(changes, created) = DocumentChangeDates.objects.get_or_create(document=doc)
new_version = doc.latest_event(type='new_revision')
normal_change = doc.latest_event()
significant_change = None
for event in doc.docevent_set.filter(type='changed_document'):
for state in SIGNIFICANT_STATES:
if ('<b>%s</b>' % state) in event.desc:
significant_change = event
break
<|fim▁hole|> changes.significant_change_date = significant_change and significant_change.time.date()
changes.save()
sys.stdout.write('Document %s/%s\r' % (index, total))
sys.stdout.flush()
index += 1
print<|fim▁end|>
|
changes.new_version_date = new_version and new_version.time.date()
changes.normal_change_date = normal_change and normal_change.time.date()
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use docopt::Docopt;
use eyre::bail;
use serde::Deserialize;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::path::Path;
use std::process;
const USAGE: &str = "
Print or check BLAKE2 (512-bit) checksums.
With no FILE, or when FILE is -, read standard input.
Usage:
b2sum [options] [<filename>]...
b2sum (-h | --help)
b2sum --version
Options:
-c, --check read BLAKE2 sums from the FILEs and check them
-l, --length=BITS digest length in bits; must not exceed the maximum for the
blake2 algorithm and must be a multiple of 8 [default: 512]
--tag create a BSD-style checksum
The following five options are useful only when verifying checksums:
--ignore-missing don't fail or report status for missing files
--quiet don't print OK for each successfully verified file
--status don't output anything, status code shows success
--strict exit non-zero for improperly formatted checksum lines
-w, --warn warn about improperly formatted checksum lines
-h, --help display this help and exit
--version output version information and exit
The sums are computed as described in RFC 7693. When checking, the input
should be a former output of this program. The default mode is to print
a line with checksum and name for each FILE.
";
#[derive(Debug, Deserialize)]
struct Args {
arg_filename: Vec<String>,
flag_check: bool,
flag_ignore_missing: bool,
flag_length: usize,
flag_quiet: bool,
flag_status: bool,
flag_strict: bool,
flag_tag: bool,
flag_version: bool,
flag_warn: bool,
}
fn print_version() -> ! {
let version = env!("CARGO_PKG_VERSION");
println!("b2sum-rs {}", version);
process::exit(0)
}
fn hash_reader<R>(length: usize, mut reader: R) -> eyre::Result<String>
where
R: BufRead,
{
let mut digest = blake2b_simd::Params::new().hash_length(length).to_state();
loop {
let count = {
let data = reader.fill_buf()?;
if data.is_empty() {
break;
}
digest.update(data);
data.len()
};
reader.consume(count);
}
let output = digest.finalize();
let result = output.to_hex().to_ascii_lowercase();
Ok(result)
}
fn hash_file<P>(length: usize, path: P) -> eyre::Result<String>
where
P: AsRef<Path>,
{
let file = File::open(path)?;
let reader = BufReader::new(file);
hash_reader(length, reader)
}
fn split_check_line(line: &str) -> eyre::Result<(&str, &str)> {
let hash_length = line.chars().position(|c| !c.is_digit(16)).unwrap_or(0);
if hash_length < 2 || hash_length % 2 != 0 || hash_length > 128 {
bail!("Invalid hash length: {}", hash_length);
}
let hash = &line[0..hash_length];
let line = &line[hash_length..];
if line.len() < 3 {
bail!("Malformed line");
}
let filename = &line[2..];
Ok((hash, filename))
}
fn check_input<R>(args: &Args, check_filename: &str, reader: R) -> eyre::Result<bool>
where
R: BufRead,
{
let print_result = !(args.flag_quiet || args.flag_status);
let mut errors = false;
for (i, line) in reader.lines().enumerate() {
let line = line?;
let line = line.trim();
if line.starts_with('#') {
continue;
}
let (hash, filename) = match split_check_line(line) {
Ok((hash, filename)) => (hash, filename),
Err(e) => {
if args.flag_strict {
errors = true;
}
if args.flag_warn {
println!("{}:{}: {}", check_filename, i + 1, e)
}
continue;
}
};
let length = hash.len() / 2;
let calculated_hash = match hash_file(length, filename) {
Ok(h) => h,
Err(e) => {
if let Some(io_err) = e.downcast_ref::<io::Error>() {
if io_err.kind() == io::ErrorKind::NotFound && args.flag_ignore_missing {
continue;
}
}
errors = true;
if !args.flag_status {
println!("{}: FAILED {}", filename, e);
}
continue;
}
};
let matched = hash == calculated_hash;
if !matched {
errors = true;
}
if print_result {
print!("{}: ", filename);
if matched {
println!("OK");
} else {
println!("FAILED");
}
}
}
Ok(errors)
}
fn check_args(args: Args) -> eyre::Result<i32> {
let filename = args.arg_filename[0].as_str();
let errors = if filename == "-" {
let stdin = io::stdin();
check_input(&args, filename, stdin.lock())?
} else {
let file = File::open(filename)?;
let reader = BufReader::new(file);
check_input(&args, filename, reader)?
};
let code = if errors { 1 } else { 0 };
Ok(code)
}
fn hash_args(args: Args) -> eyre::Result<i32> {
let length = args.flag_length / 8;
for filename in args.arg_filename {
let hash = if filename == "-" {
let stdin = io::stdin();
hash_reader(length, stdin.lock())?
} else {
hash_file(length, &filename)?
};
if args.flag_tag {
print!("BLAKE2b");
if args.flag_length < 512 {
print!("-{}", args.flag_length);
}
println!(" ({}) = {}", filename, hash);
} else {
println!("{} {}", hash, filename);
}
}
Ok(0)
}
fn main() -> eyre::Result<()> {
stable_eyre::install()?;
let mut args: Args = Docopt::new(USAGE).and_then(|d| d.deserialize()).unwrap_or_else(|e| e.exit());
if args.flag_version {
print_version();
}
if args.arg_filename.is_empty() {
args.arg_filename.push("-".to_string());
}
let result = if args.flag_check { check_args(args)? } else { hash_args(args)? };
process::exit(result)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn split_check_line_with_valid_line() {
let line = "c0ae24f806df19d850565b234bc37afd5035e7536388290db9413c98578394313f38b093143ecfbc208425d54b9bfef0d9917a9e93910f7914a97e73fea23534 test";
let (hash, filename) = split_check_line(line).unwrap();
assert_eq!(
"c0ae24f806df19d850565b234bc37afd5035e7536388290db9413c98578394313f38b093143ecfbc208425d54b9bfef0d9917a9e93910f7914a97e73fea23534",
hash
);
assert_eq!("test", filename);
}
#[test]
fn split_check_line_with_truncated_line() {
let line = "c0ae24f806df19d850565b234bc37afd5035e7536388290db9413c98578394313f38b093143ecfbc208425d54b9bfef0d9917a9e93910f7914a97e73fea23534 ";
let result = split_check_line(line).unwrap_err();
assert_eq!("Malformed line", result.to_string());
}
#[test]
fn split_check_line_with_missing_filename() {
let line = "c0ae24f806df19d850565b234bc37afd5035e7536388290db9413c98578394313f38b093143ecfbc208425d54b9bfef0d9917a9e93910f7914a97e73fea23534 ";
let result = split_check_line(line).unwrap_err();
assert_eq!("Malformed line", result.to_string());
}
#[test]
fn split_check_line_with_too_small_hash() {
let line = "c test";
let result = split_check_line(line).unwrap_err();
assert_eq!("Invalid hash length: 1", result.to_string());
}
#[test]
fn split_check_line_with_too_long_hash() {
let line = "c0ae24f806df19d850565b234bc37afd5035e7536388290db9413c98578394313f38b093143ecfbc208425d54b9bfef0d9917a9e93910f7914a97e73fea2353400 test";
let result = split_check_line(line).unwrap_err();
assert_eq!("Invalid hash length: 130", result.to_string());
}
#[test]
fn split_check_line_with_non_even_hash() {<|fim▁hole|> let result = split_check_line(line).unwrap_err();
assert_eq!("Invalid hash length: 5", result.to_string());
}
#[test]
fn test_hash_formatting() {
let expected = "7ea59e7a000ec003846b6607dfd5f9217b681dc1a81b0789b464c3995105d93083f7f0a86fca01a1bed27e9f9303ae58d01746e3b20443480bea56198e65bfc5";
assert_eq!(expected, hash_reader(64, "hi\n".as_bytes()).unwrap());
}
}<|fim▁end|>
|
let line = "c0ae0 test";
|
<|file_name|>LD_simulation.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
import ringo_config
cfg = ringo_config.RingoConfig()
import pyximport;pyximport.install(build_dir=cfg.pyximport_build())
import argparse
import random
import numpy as np
import model
from simulation import Simulation, SimParameters, EventType, RearrangementType
def run_L_D_simulation(self, L, D):
# L = duplication length
# D = number of DCJs in each branch.
#
param = self.sim_parameters
# pre_dups (at root) and post_dups (at branches) to achieve 1.5 genes/family in average.
pre_duplications = int(0.43 * param.num_genes / L)
post_duplications = int(0.07 * param.num_genes / L)
post_duplications = [int(0.5 * post_duplications), int(1.5 * post_duplications)]
# post_duplications = [int(1 * post_duplications), int(1 * post_duplications)]
param.pre_duplications = pre_duplications
current_copy_number = None # will init at root
deletion_length_range = xrange(1, param.indel_length + 1)
duplication_length_range = xrange(1, L + 1)
idx = 1
ev_tree = self.sim_tree
for ev_node in ev_tree.preorder_node_iter():
if ev_node.parent_node is None:
# identity genome:
ev_node.value = current_genome = model.Genome.identity(param.num_genes, param.num_chr)
ev_node.events = {ev: 0 for ev in EventType.all}
# add copy number information to track orthologous/paralogous, when duplications are present:
for chromosome in current_genome.chromosomes:
chromosome.copy_number = [1] * len(chromosome.gene_order)
current_copy_number = current_genome.gene_count()
# pre-duplications:
for i in range(pre_duplications):
Simulation.apply_random_segmental_duplication(current_genome,
range(1, param.duplication_length + 1),
current_copy_number)
ev_node.events[EventType.DUPLICATION] = pre_duplications
# ev_node.edge.length = pre_duplications
if ev_node.label is None:
ev_node.label = "Root"
else:
# evolve genome:
if ev_node.is_internal():
if ev_node.label is None:
ev_node.label = "M%02d" % idx
idx += 1
else: # complete labelling for leaves
ev_node.label = ev_node.taxon.label
current_genome = ev_node.parent_node.value.clone(ev_node.label)
ev_node.value = current_genome
pd = post_duplications.pop()
ev_node.edge.length = D + pd
# events
events = [EventType.DUPLICATION] * pd + [EventType.REARRANGEMENT] * D
ev_node.edge.events = {ev: 0 for ev in EventType.all}
random.shuffle(events)
for event in events:
if event == EventType.DUPLICATION:
Simulation.apply_random_segmental_duplication(current_genome, duplication_length_range, current_copy_number)
ev_node.edge.events[event] += 1
elif event == EventType.REARRANGEMENT:
# here, I can also have deletions:
ev = np.random.choice([RearrangementType.REVERSAL, EventType.DELETION], 1,
p=[param.rearrangement_p, param.deletion_p])[0]
if ev == RearrangementType.REVERSAL:
Simulation.apply_random_reversal(current_genome)
ev_node.edge.events[event] += 1
else:
Simulation.apply_random_deletion(current_genome, deletion_length_range)<|fim▁hole|> ev_node.edge.events[EventType.DELETION] += 1
ev_node.events = {ev: ev_node.parent_node.events[ev] + count for ev, count in
ev_node.edge.events.iteritems()}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Simulates rearrangement evolution on a given newick tree")
parser.add_argument("-s", "--sim", type=int, help="Simulate a new birth_death with SIM species")
parser.add_argument("-n", "--num_genes", type=int, default=100, help="Number of genes in the root genome.")
parser.add_argument("-c", "--num_chr", type=int, default=5, help="Number of chromosomes in the root genome.")
parser.add_argument("-L", "-dl", "--duplication_length", type=int, default=5, help="Maximum length of duplication event.")
parser.add_argument("-D", "--rearrangements", type=int, default=5, help="Number of rearrangements.")
parser.add_argument("-o", "--output", type=str, default="sim", help="Name of the output folder.")
parser.add_argument("-dp", "--deletion_p", type=float, default=0.0, help="Percentage of deletions, from 0 to 1.0")
parser.add_argument("-ip", "--insertion_p", type=float, default=0.0, help="Percentage of insertions, from 0 to 1.0")
parser.add_argument("-il", "--indel_length", type=int, default=5, help="Maximum size of indel event in genes.")
parser.add_argument("-d", "--disturb", type=float, default=0,
help="Disturb branch lengths multiplying each by e^r, where r in [-d,+d]. ")
param = parser.parse_args()
# Simulation parameters:
sim_par = SimParameters(num_genes=param.num_genes, num_chr=param.num_chr,
del_p=param.deletion_p, ins_p=param.insertion_p, indel_length=param.indel_length,
duplication_length=param.duplication_length)
# start sim object;
sim = Simulation(param.output, sim_par)
sim.simulate_tree(param.sim)
run_L_D_simulation(sim, param.duplication_length, param.rearrangements)
sim.save_simulation(save_copies=True)<|fim▁end|>
| |
<|file_name|>whitelabel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Script to update the current edX iOS App with different names, resources, etc.
Requirements:
pip install pyyaml
"""
import argparse
import logging
import os
import shutil
import subprocess
import sys
import yaml
class WhitelabelApp:
"""
Update the current edX iOS App using configured resources and properties.
"""
EXAMPLE_CONFIG_FILE = """
---
# Notes:
# * All configuration items are optional.
# * Use absolute paths if the property is not relative to the config_dir (or project_dir, for project_* properties).
# Path to your overridden project properties file, which may list your private project config files.
properties: 'edx.properties'
# Path to the Resources to override. Omit to copy no resources.
resources: 'Resources'
# List of patch files to apply to the source. Omit to apply no patches.
patches:
- patches/0001_update_text.patch
- patches/0001_version.patch
# Update the iOS app properties (plist file):
plist:
CFBundleName: 'MySchoolApp'
CFBundleDisplayName: 'MySchoolApp'
CFBundleSpokenName: 'My School App'
FacebookDisplayName: 'MySchoolApp'
CFBundleVersion: 2.6.1.6
CFBundleIconFiles: !!null # using null deletes the property.
CFBundleIcons: !!null
CFBundleIcons~ipad: !!null
# Path to the base dir containing your properties_file and resources dir.
# Defaults to the dir containing config file passed to this script.
config_dir: '/path/to/your/config/'
# You probably don't need to provide anything below this line.
# Defaults are as shown.
# Base dir of the project to update.
project_dir: '.'
# All project_ paths below can be relative to the project_dir
# Path to the application's plist file
project_plist: 'Source/edX-Info.plist'
# Path to the project's Resources dir
project_resources: 'Source/Resources'
# Path to the OSX utility command, PlistBuddy
plist_buddy = '/usr/libexec/PlistBuddy'
# Path to git
git_command = '/usr/bin/env git'
"""
def __init__(self, **kwargs):
# Config-relative paths
self.config_dir = kwargs.get('config_dir')
if not self.config_dir:
self.config_dir = '.'
# Assume that these paths are relative to config_dir.
# (If 'properties' is absolute, then it will be unchanged by the path join)
self.resources = kwargs.get('resources')
if self.resources:
self.resources = os.path.join(self.config_dir, self.resources)
self.patches = []
for patchfile in kwargs.get('patches', []):
self.patches.append(os.path.join(self.config_dir, patchfile))
# Project-relative paths
self.project_dir = kwargs.get('project_dir')
if not self.project_dir:
self.project_dir = '.'
self.project_resources = kwargs.get('project_resources')
if not self.project_resources:
self.project_resources = os.path.join(self.project_dir, 'Source', 'Resources')
self.project_properties = kwargs.get('properties')
if self.project_properties:
self.project_properties = os.path.join(self.project_dir, self.project_properties)
self.project_plist = kwargs.get('project_plist')
if not self.project_plist:
self.project_plist = os.path.join(self.project_dir, 'Source', 'edX-Info.plist')
self.plist = kwargs.get('plist', {})
self.plist_buddy = kwargs.get('plist_buddy')
if not self.plist_buddy:
self.plist_buddy = '/usr/libexec/PlistBuddy'
self.git_command = kwargs.get('git_command')
if not self.git_command:
self.git_command = '/usr/bin/env git'
def whitelabel(self):
"""
Update the properties, resources, and configuration of the current app.
"""<|fim▁hole|> self.update_plist()
self.copy_resources()
else:
logging.error("Update aborted until patches are repaired.")
def create_project_properties(self):
"""
Create a project .properties file that points to the config_dir file, if configured.
"""
if self.project_properties and self.config_dir:
logging.info("Creating %s", self.project_properties)
content = "edx.dir = '{config_dir}'"
with open(self.project_properties, 'w') as f:
f.write(content.format(config_dir=self.config_dir))
else:
logging.debug("Not creating %s, properties or config_dir not set", self.project_properties)
def update_plist(self):
"""
Update the app's plist file.
"""
for name, value in self.plist.items():
if self._update_plist(name, value):
logging.info("Updated %s: %s=%s", self.project_plist, name, value)
else:
logging.error("Error updating %s: %s=%s", self.project_plist, name, value)
def copy_resources(self):
"""
Copy over the existing resources files.
"""
if self.resources:
self._copytree(self.resources, self.project_resources)
else:
logging.debug("No resources to copy to %s", self.project_resources)
def apply_patches(self):
"""
Apply the given patches to the project source.
"""
git_error = False
for reference in self.patches:
if git_error:
logging.error(" %s", reference)
elif not self._apply_patch(reference):
git_error = True
logging.error("Issue detected while applying patch %s. "
"Please fix the issue and manually apply the remaining patches:", reference)
if not self.patches:
logging.debug("No patches to apply")
return not git_error
def _copytree(self, src, dst, symlinks=False, ignore=None):
"""
Recursively copy the files and dirs from src to dst.
We can't use os.path.copytree here, because it balks if dst exists.
"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
self._copytree(s, d, symlinks, ignore)
else:
logging.info("Copying %s to %s", s, d)
shutil.copy2(s, d)
def _update_plist(self, name, value):
"""Update the app .plist file using PlistBuddy"""
cmd = 'Delete' if value is None else 'Set'
command = '{cmd} :{name} {value}'.format(cmd=cmd, name=name, value=value)
call_args = self.plist_buddy.split(' ') + ['-c', command, self.project_plist]
return self._system_command(call_args)
def _apply_patch(self, filename):
"""Apply the given patch using a 3-way merge."""
call_args = self.git_command.split(' ') + ['apply', '--3way', filename]
return self._system_command(call_args)
@staticmethod
def _system_command(call_args):
"""Make the given subprocess call, and pipe output/errors to logger."""
logging.debug("System call: %s", " ".join(call_args))
process = subprocess.Popen(call_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = process.communicate()
if output:
logging.info(output)
if error:
logging.error(error)
return process.returncode == 0
def main():
"""
Parse the command line arguments, and pass them to WhitelabelApp.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--help-config-file', action='store_true', help="Print out a sample config-file, and exit")
parser.add_argument('--config-file', '-c', help="Path to the configuration file")
parser.add_argument('--verbose', '-v', action='count', help="Enable verbose logging. Repeat -v for more output.")
args = parser.parse_args()
if args.help_config_file:
print(WhitelabelApp.EXAMPLE_CONFIG_FILE)
sys.exit(0)
if not args.config_file:
parser.print_help()
sys.exit(1)
log_level = logging.WARN
if args.verbose > 0:
log_level = logging.INFO
if args.verbose > 1:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
with open(args.config_file) as f:
config = yaml.load(f) or {}
# Use the config_file's directory as the default config_dir
config.setdefault('config_dir', os.path.dirname(args.config_file))
whitelabeler = WhitelabelApp(**config)
whitelabeler.whitelabel()
if __name__ == "__main__":
main()<|fim▁end|>
|
if self.apply_patches():
self.create_project_properties()
|
<|file_name|>test_theanocode.py<|end_file_name|><|fim▁begin|>from sympy.external import import_module
from sympy.utilities.pytest import raises, SKIP
from sympy.core.compatibility import range
theano = import_module('theano')
if theano:
import numpy as np
ts = theano.scalar
tt = theano.tensor
xt, yt, zt = [tt.scalar(name, 'floatX') for name in 'xyz']
else:
#bin/test will not execute any tests now
disabled = True
import sympy
from sympy import S
sy = sympy
from sympy.abc import x, y, z
from sympy.printing.theanocode import (theano_code, dim_handling,
theano_function)
def fgraph_of(*exprs):
""" Transform SymPy expressions into Theano Computation """
outs = list(map(theano_code, exprs))
ins = theano.gof.graph.inputs(outs)
ins, outs = theano.gof.graph.clone(ins, outs)
return theano.gof.FunctionGraph(ins, outs)
def theano_simplify(fgraph):
""" Simplify a Theano Computation """
mode = theano.compile.get_default_mode().excluding("fusion")
fgraph = fgraph.clone()
mode.optimizer.optimize(fgraph)
return fgraph
def theq(a, b):
""" theano equality """
astr = theano.printing.debugprint(a, file='str')
bstr = theano.printing.debugprint(b, file='str')
if not astr == bstr:
print()
print(astr)
print(bstr)
return astr == bstr
def test_symbol():
xt = theano_code(x)
assert isinstance(xt, (tt.TensorVariable, ts.ScalarVariable))
assert xt.name == x.name
assert theano_code(x, broadcastables={x: (False,)}).broadcastable == (False,)
assert theano_code(x, broadcastables={x: (False,)}).name == x.name
def test_add():
expr = x + y
comp = theano_code(expr)
assert comp.owner.op == theano.tensor.add
comp = theano_code(expr, broadcastables={x: (False,), y: (False,)})
assert comp.broadcastable == (False,)
comp = theano_code(expr, broadcastables={x: (False, True), y: (False, False)})
assert comp.broadcastable == (False, False)
def test_trig():
assert theq(theano_code(sympy.sin(x)), tt.sin(xt))
assert theq(theano_code(sympy.tan(x)), tt.tan(xt))
def test_many():
expr = sy.exp(x**2 + sy.cos(y)) * sy.log(2*z)
comp = theano_code(expr)
expected = tt.exp(xt**2 + tt.cos(yt)) * tt.log(2*zt)
# assert theq(comp, expected)
def test_dtype():
assert theano_code(x, dtypes={x: 'float32'}).type.dtype == 'float32'
assert theano_code(x, dtypes={x: 'float64'}).type.dtype == 'float64'
assert theano_code(x+1, dtypes={x: 'float32'}).type.dtype == 'float32'
assert theano_code(x+y, dtypes={x: 'float64', y: 'float32'}).type.dtype == 'float64'
def test_MatrixSymbol():
X = sympy.MatrixSymbol('X', 4, 5)
Xt = theano_code(X)
assert isinstance(Xt, tt.TensorVariable)
assert Xt.broadcastable == (False, False)
def test_MatMul():
X = sympy.MatrixSymbol('X', 4, 4)
Y = sympy.MatrixSymbol('X', 4, 4)
Z = sympy.MatrixSymbol('X', 4, 4)
expr = X*Y*Z
assert isinstance(theano_code(expr).owner.op, tt.Dot)
def test_Transpose():
X = sympy.MatrixSymbol('X', 4, 4)
assert isinstance(theano_code(X.T).owner.op, tt.DimShuffle)
def test_MatAdd():
X = sympy.MatrixSymbol('X', 4, 4)
Y = sympy.MatrixSymbol('X', 4, 4)<|fim▁hole|>def test_symbols_are_created_once():
expr = x**x
comp = theano_code(expr)
assert theq(comp, xt**xt)
def test_dim_handling():
assert dim_handling([x], dim=2) == {x: (False, False)}
assert dim_handling([x, y], dims={x: 1, y: 2}) == {x: (False, True),
y: (False, False)}
assert dim_handling([x], broadcastables={x: (False,)}) == {x: (False,)}
def test_Rationals():
assert theq(theano_code(sympy.Integer(2) / 3), tt.true_div(2, 3))
assert theq(theano_code(S.Half), tt.true_div(1, 2))
def test_Integers():
assert theano_code(sympy.Integer(3)) == 3
def test_factorial():
n = sympy.Symbol('n')
assert theano_code(sympy.factorial(n))
def test_Derivative():
simp = lambda expr: theano_simplify(fgraph_of(expr))
assert theq(simp(theano_code(sy.Derivative(sy.sin(x), x, evaluate=False))),
simp(theano.grad(tt.sin(xt), xt)))
def test_theano_function_simple():
f = theano_function([x, y], [x+y])
assert f(2, 3) == 5
def test_theano_function_numpy():
f = theano_function([x, y], [x+y], dim=1,
dtypes={x: 'float64', y: 'float64'})
assert np.linalg.norm(f([1, 2], [3, 4]) - np.asarray([4, 6])) < 1e-9
f = theano_function([x, y], [x+y], dtypes={x: 'float64', y: 'float64'},
dim=1)
xx = np.arange(3).astype('float64')
yy = 2*np.arange(3).astype('float64')
assert np.linalg.norm(f(xx, yy) - 3*np.arange(3)) < 1e-9
def test_theano_function_kwargs():
import numpy as np
f = theano_function([x, y, z], [x+y], dim=1, on_unused_input='ignore',
dtypes={x: 'float64', y: 'float64', z: 'float64'})
assert np.linalg.norm(f([1, 2], [3, 4], [0, 0]) - np.asarray([4, 6])) < 1e-9
f = theano_function([x, y, z], [x+y],
dtypes={x: 'float64', y: 'float64', z: 'float64'},
dim=1, on_unused_input='ignore')
xx = np.arange(3).astype('float64')
yy = 2*np.arange(3).astype('float64')
zz = 2*np.arange(3).astype('float64')
assert np.linalg.norm(f(xx, yy, zz) - 3*np.arange(3)) < 1e-9
def test_slice():
assert theano_code(slice(1, 2, 3)) == slice(1, 2, 3)
assert str(theano_code(slice(1, x, 3), dtypes={x: 'int32'})) ==\
str(slice(1, xt, 3))
def test_MatrixSlice():
n = sympy.Symbol('n', integer=True)
X = sympy.MatrixSymbol('X', n, n)
Y = X[1:2:3, 4:5:6]
Yt = theano_code(Y)
from theano.scalar import Scalar
from theano import Constant
s = Scalar('int64')
assert tuple(Yt.owner.op.idx_list) == (slice(s, s, s), slice(s, s, s))
assert Yt.owner.inputs[0] == theano_code(X)
# == doesn't work in theano like it does in SymPy. You have to use
# equals.
assert [i.equals(j) for i, j in zip(Yt.owner.inputs[1:],[
Constant(s, 1),
Constant(s, 2),
Constant(s, 3),
Constant(s, 4),
Constant(s, 5),
Constant(s, 6),
])]
k = sympy.Symbol('k')
kt = theano_code(k, dtypes={k: 'int32'})
start, stop, step = 4, k, 2
Y = X[start:stop:step]
Yt = theano_code(Y, dtypes={n: 'int32', k: 'int32'})
# assert Yt.owner.op.idx_list[0].stop == kt
def test_BlockMatrix():
n = sympy.Symbol('n', integer=True)
A = sympy.MatrixSymbol('A', n, n)
B = sympy.MatrixSymbol('B', n, n)
C = sympy.MatrixSymbol('C', n, n)
D = sympy.MatrixSymbol('D', n, n)
At, Bt, Ct, Dt = map(theano_code, (A, B, C, D))
Block = sympy.BlockMatrix([[A, B], [C, D]])
Blockt = theano_code(Block)
solutions = [tt.join(0, tt.join(1, At, Bt), tt.join(1, Ct, Dt)),
tt.join(1, tt.join(0, At, Ct), tt.join(0, Bt, Dt))]
assert any(theq(Blockt, solution) for solution in solutions)
@SKIP
def test_BlockMatrix_Inverse_execution():
k, n = 2, 4
dtype = 'float32'
A = sympy.MatrixSymbol('A', n, k)
B = sympy.MatrixSymbol('B', n, n)
inputs = A, B
output = B.I*A
cutsizes = {A: [(n//2, n//2), (k//2, k//2)],
B: [(n//2, n//2), (n//2, n//2)]}
cutinputs = [sympy.blockcut(i, *cutsizes[i]) for i in inputs]
cutoutput = output.subs(dict(zip(inputs, cutinputs)))
dtypes = dict(zip(inputs, [dtype]*len(inputs)))
f = theano_function(inputs, [output], dtypes=dtypes, cache={})
fblocked = theano_function(inputs, [sympy.block_collapse(cutoutput)],
dtypes=dtypes, cache={})
ninputs = [np.random.rand(*x.shape).astype(dtype) for x in inputs]
ninputs = [np.arange(n*k).reshape(A.shape).astype(dtype),
np.eye(n).astype(dtype)]
ninputs[1] += np.ones(B.shape)*1e-5
assert np.allclose(f(*ninputs), fblocked(*ninputs), rtol=1e-5)
def test_DenseMatrix():
t = sy.Symbol('theta')
for MatrixType in [sy.Matrix, sy.ImmutableMatrix]:
X = MatrixType([[sy.cos(t), -sy.sin(t)], [sy.sin(t), sy.cos(t)]])
tX = theano_code(X)
assert isinstance(tX, tt.TensorVariable)
assert tX.owner.op == tt.join
def test_AppliedUndef():
t = sy.Symbol('t')
f = sy.Function('f')
ft = theano_code(f(t))
assert isinstance(ft, tt.TensorVariable)
assert ft.name == 'f_t'
def test_bad_keyword_args_raise_error():
raises(Exception, lambda : theano_function([x], [x+1], foobar=3))
def test_cache():
sx = sy.Symbol('x')
cache = {}
tx = theano_code(sx, cache=cache)
assert theano_code(sx, cache=cache) is tx
assert theano_code(sx, cache={}) is not tx
def test_Piecewise():
# A piecewise linear
xt, yt = theano_code(x), theano_code(y)
expr = sy.Piecewise((0, x<0), (x, x<2), (1, True)) # ___/III
result = theano_code(expr)
assert result.owner.op == tt.switch
expected = tt.switch(xt<0, 0, tt.switch(xt<2, xt, 1))
assert theq(result, expected)
expr = sy.Piecewise((x, x < 0))
result = theano_code(expr)
expected = tt.switch(xt < 0, xt, np.nan)
assert theq(result, expected)
expr = sy.Piecewise((0, sy.And(x>0, x<2)), \
(x, sy.Or(x>2, x<0)))
result = theano_code(expr)
expected = tt.switch(tt.and_(xt>0,xt<2), 0, \
tt.switch(tt.or_(xt>2, xt<0), xt, np.nan))
assert theq(result, expected)
def test_Relationals():
xt, yt = theano_code(x), theano_code(y)
assert theq(theano_code(x > y), xt > yt)
assert theq(theano_code(x < y), xt < yt)
assert theq(theano_code(x >= y), xt >= yt)
assert theq(theano_code(x <= y), xt <= yt)<|fim▁end|>
|
Z = sympy.MatrixSymbol('X', 4, 4)
expr = X+Y+Z
assert isinstance(theano_code(expr).owner.op, tt.Elemwise)
|
<|file_name|>emr_terminate_job_flow_operator.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_terminate_job_flow`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.emr_terminate_job_flow import EmrTerminateJobFlowOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_terminate_job_flow`.",
DeprecationWarning,
stacklevel=2,<|fim▁hole|><|fim▁end|>
|
)
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
time_trigger_opts = [
cfg.IntOpt('min_interval',
default=60 * 60,
help='The minimum interval of two adjacent time points. '
'min_interval >= (max_window_time * 2)'),
cfg.IntOpt('min_window_time',
default=900,
help='The minimum window time'),
cfg.IntOpt('max_window_time',
default=1800,
help='The maximum window time'),
cfg.StrOpt('time_format',
default='calendar',
choices=['crontab', 'calendar'],
help='The type of time format which is used to compute time'),
<|fim▁hole|> help='Interval, in seconds, in which Karbor will poll for '
'trigger events'),
cfg.StrOpt('scheduling_strategy',
default='multi_node',
help='Time trigger scheduling strategy '
)
]
CONF = cfg.CONF
CONF.register_opts(time_trigger_opts)<|fim▁end|>
|
cfg.IntOpt('trigger_poll_interval',
default=15,
|
<|file_name|>0019_auto_20170418_1243.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0018_auto_20170418_1220'),
]
operations = [
migrations.AlterField(
model_name='case',<|fim▁hole|> name='type',
field=models.ForeignKey(related_name='cases', to='cases.CaseType'),
),
]<|fim▁end|>
| |
<|file_name|>vis.py<|end_file_name|><|fim▁begin|>"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:<|fim▁hole|> color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)<|fim▁end|>
|
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
|
<|file_name|>console.ts<|end_file_name|><|fim▁begin|>import chalk = require("chalk");
import { take, select } from "redux-saga/effects";
import path = require("path");
import moment = require("moment");
import { titleize } from "inflection";
import { parallel } from "mesh";
import { weakMemo } from "../memo";
import AnsiUp from "ansi_up";
import { reader } from "../monad";
import { noop } from "lodash";
import { ImmutableObject, createImmutableObject } from "../immutable";
import { LogLevel, LogAction, LogActionTypes, Logger } from "./base";
// beat TS type checking
chalk.enabled = true;
function createLogColorizer(tester: RegExp, replaceValue: any) {
return function(input: string) {
if (!tester.test(input)) return input;
return input.replace(tester, replaceValue);
}
}
export type ConsoleLogState = {
argv?: {
color?: boolean,
hlog?: boolean
},
log?: {
level: LogLevel,
prefix?: string
}
}
<|fim▁hole|>
const highlighters = [
createLogColorizer(/^INF/, (match) => chalk.bgCyan(match)),
createLogColorizer(/^ERR/, (match) => chalk.bgRed(match)),
createLogColorizer(/^DBG/, (match) => chalk.grey.bgBlack(match)),
createLogColorizer(/^WRN/, (match) => chalk.bgYellow(match)),
// timestamp
createLogColorizer(/\[\d+\.\d+\.\d+\]/, (match, inner) => `[${chalk.grey(inner)}]`),
// URL
createLogColorizer(/((\w{3,}\:\/\/)|([^\/\s\("':]+)?\/)([^\/\)\s"':]+\/?)+/g, (match) => {
return chalk.yellow(/\w+:\/\//.test(match) ? match : match.replace(cwd + "/", ""))
}),
// duration
createLogColorizer(/\s\d+(\.\d+)?(s|ms|m|h|d)(\s|$)/g, (match) => chalk.bold.cyan(match)),
// numbers
createLogColorizer(/\b\d+(\.\d+)?\b/g, (match, inner) => `${chalk.cyan(match)}`),
// strings
createLogColorizer(/"(.*?)"/g, (match, inner) => `"${chalk.blue(inner)}"`),
// tokens
createLogColorizer(/([\:\{\}",\(\)]|->|null|undefined|Infinity)/g, (match) => chalk.grey(match)),
// <<output - green (from audio again)
createLogColorizer(/<<(.*)/g, (match, word) => chalk.green(word)),
// >>input - magenta (from audio)
createLogColorizer(/>>(.*)/g, (match, word) => chalk.magenta(word)),
// **BIG EMPHASIS**
createLogColorizer(/\*\*(.*?)\*\*/, (match, word) => chalk.bgBlue(word)),
// *emphasis*
createLogColorizer(/\*(.*?)\*/g, (match, word) => chalk.bold(word)),
// ___underline___
createLogColorizer(/___(.*?)___/g, (match, word) => chalk.underline(word)),
// ~de emphasis~
createLogColorizer(/~(.*?)~/g, (match, word) => chalk.grey(word)),
];
function colorize(input: string) {
let output = input;
for (let i = 0, n = highlighters.length; i < n; i++) output = highlighters[i](output);
return output;
}
function styledConsoleLog(...args: any[]) {
var argArray = [];
if (args.length) {
var startTagRe = /<span\s+style=(['"])([^'"]*)\1\s*>/gi;
var endTagRe = /<\/span>/gi;
var reResultArray;
argArray.push(arguments[0].replace(startTagRe, '%c').replace(endTagRe, '%c'));
while (reResultArray = startTagRe.exec(arguments[0])) {
argArray.push(reResultArray[2]);
argArray.push('');
}
// pass through subsequent args since chrome dev tools does not (yet) support console.log styling of the following form: console.log('%cBlue!', 'color: blue;', '%cRed!', 'color: red;');
for (var j = 1; j < arguments.length; j++) {
argArray.push(arguments[j]);
}
}
console.log.apply(console, argArray);
}
// I'm against abbreviations, but it's happening here
// since all of these are the same length -- saves space in stdout, and makes
// logs easier to read.
const PREFIXES = {
[LogLevel.DEBUG]: "DBG ",
[LogLevel.INFO]: "INF ",
[LogLevel.WARNING]: "WRN ",
[LogLevel.ERROR]: "ERR ",
};
const defaultState = { level: LogLevel.ALL, prefix: "" };
export function* consoleLogSaga() {
while(true) {
const { log: { level: acceptedLevel, prefix }}: ConsoleLogState = (yield select()) || defaultState;
let { text, level }: LogAction = (yield take(LogActionTypes.LOG));
if (!(acceptedLevel & level)) continue;
const log = {
[LogLevel.DEBUG]: console.log.bind(console),
[LogLevel.LOG]: console.log.bind(console),
[LogLevel.INFO]: console.info.bind(console),
[LogLevel.WARNING]: console.warn.bind(console),
[LogLevel.ERROR]: console.error.bind(console)
}[level];
text = PREFIXES[level] + (prefix || "") + text;
text = colorize(text);
if (typeof window !== "undefined" && !window["$synthetic"]) {
return styledConsoleLog(new AnsiUp().ansi_to_html(text));
}
log(text);
}
}<|fim▁end|>
|
const cwd = process.cwd();
|
<|file_name|>network.go<|end_file_name|><|fim▁begin|>/* cSploit - a simple penetration testing suite
* Copyright (C) 2016 Massimo Dragano aka tux_mind <[email protected]>
*
* cSploit is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* cSploit is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with cSploit. If not, see <http://www.gnu.org/licenses/\>.
*
*/
package views
import "github.com/cSploit/daemon/models"
type networkIdxElem struct {
models.Network
HideHosts string `json:"hosts,omitempty"`
}
type networkShowView struct {
models.Network
OverrideHosts interface{} `json:"hosts,omitempty"`
}
func NetworkIndex(args interface{}) interface{} {
nets := args.([]models.Network)
res := make([]networkIdxElem, len(nets))
for i, n := range nets {
res[i] = networkIdxElem{Network: n}
}
return res
}
func NetworkShow(arg interface{}) interface{} {
net := arg.(models.Network)
res := networkShowView{Network: net}
if len(net.Hosts) > 0 {
res.OverrideHosts = HostsIndex(net.Hosts)
}
return res<|fim▁hole|>
func networkAsChild(arg interface{}) interface{} {
network := arg.(models.Network)
return networkIdxElem{Network: network}
}<|fim▁end|>
|
}
|
<|file_name|>Digivolution.java<|end_file_name|><|fim▁begin|>package Digivolver;
public class Digivolution{
private Digimon digimon;
private int minDp = 0;
private int maxDp = 0;
public boolean isWithinDp(int minDp, int maxDp){
return this.minDp<=maxDp && this.maxDp>=minDp;
}
public Digivolution(Digimon digimon, int minDp, int maxDp) {
this.digimon = digimon;
this.minDp = minDp;
this.maxDp = maxDp;
}
public Digimon getDigimon() {
return digimon;<|fim▁hole|> public void setDigimon(Digimon digimon) {
this.digimon = digimon;
}
public int getMinDp(){
return minDp;
}
public int getMaxDp(){
return maxDp;
}
}<|fim▁end|>
|
}
|
<|file_name|>DepthType.java<|end_file_name|><|fim▁begin|>/**
*
*/
package com.coin.arbitrage.huobi.util;
<|fim▁hole|> */
public enum DepthType {
STEP0("step0"),
STEP1("step1"),
STEP2("step2"),
STEP3("step3"),
STEP4("step4"),
STEP5("step5");
private String depth;
private DepthType(String depth) {
this.depth = depth;
}
public String getDepth() {
return depth;
}
}<|fim▁end|>
|
/**
* @author Frank
*
|
<|file_name|>test_driverbase.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import sys
import inspect
import unittest
from mock import patch
from mock import MagicMock
from . import get_driver
from . import get_driver_class<|fim▁hole|>
from .driverbase import VirtDeployDriverBase
if sys.version_info[0] == 3: # pragma: no cover
builtin_import = 'builtins.__import__'
else: # pragma: no cover
builtin_import = '__builtin__.__import__'
def try_import(spec):
def fake_import(name, globals={}, locals={}, fromlist=[], level=0):
try:
return spec(name, globals, locals, fromlist, level)
except ImportError:
return MagicMock()
return fake_import
class TestVirtDeployDriverBase(unittest.TestCase):
def _get_driver_methods(self):
return inspect.getmembers(VirtDeployDriverBase, inspect.ismethod)
def _get_driver_class(self, name):
with patch(builtin_import, spec=True, new_callable=try_import):
return get_driver_class(name)
def _get_driver(self, name):
with patch(builtin_import, spec=True, new_callable=try_import):
return get_driver(name)
def test_base_not_implemented(self):
driver = VirtDeployDriverBase()
for name, method in self._get_driver_methods():
spec = inspect.getargspec(method)
with self.assertRaises(NotImplementedError) as cm:
getattr(driver, name)(*(None,) * (len(spec.args) - 1))
self.assertEqual(cm.exception.args[0], name)
def test_drivers_interface(self):
for driver_name in get_driver_names():
driver = self._get_driver_class(driver_name)
for name, method in self._get_driver_methods():
driver_method = getattr(driver, name)
self.assertNotEqual(driver_method, method)
self.assertEqual(inspect.getargspec(method),
inspect.getargspec(driver_method))
def test_get_drivers(self):
for driver_name in get_driver_names():
driver = self._get_driver(driver_name)
self.assertTrue(isinstance(driver, VirtDeployDriverBase))<|fim▁end|>
|
from . import get_driver_names
|
<|file_name|>qmlconsoleview.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
**
** Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
** Contact: http://www.qt-project.org/legal
**
** This file is part of Qt Creator.
**
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Digia. For licensing terms and
** conditions see http://www.qt.io/licensing. For further information
** use the contact form at http://www.qt.io/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 or version 3 as published by the Free
** Software Foundation and appearing in the file LICENSE.LGPLv21 and
** LICENSE.LGPLv3 included in the packaging of this file. Please review the
** following information to ensure the GNU Lesser General Public License
** requirements will be met: https://www.gnu.org/licenses/lgpl.html and
** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Digia gives you certain additional
** rights. These rights are described in the Digia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
****************************************************************************/
#include "qmlconsoleview.h"
#include "qmlconsoleitemdelegate.h"
#include "qmlconsoleitemmodel.h"
#include <coreplugin/editormanager/editormanager.h>
#include <coreplugin/manhattanstyle.h>
#include <utils/hostosinfo.h>
#include <QMouseEvent>
#include <QPainter>
#include <QApplication>
#include <QClipboard>
#include <QAbstractProxyModel>
#include <QFileInfo>
#include <QScrollBar>
#include <QStyleFactory>
#include <QString>
#include <QUrl>
using namespace QmlJS;
namespace QmlJSTools {
namespace Internal {
class QmlConsoleViewStyle : public ManhattanStyle
{
public:
QmlConsoleViewStyle(const QString &baseStyleName) : ManhattanStyle(baseStyleName) {}
void drawPrimitive(PrimitiveElement element, const QStyleOption *option, QPainter *painter,
const QWidget *widget = 0) const
{
if (element != QStyle::PE_PanelItemViewRow)
ManhattanStyle::drawPrimitive(element, option, painter, widget);
}
int styleHint(StyleHint hint, const QStyleOption *option = 0, const QWidget *widget = 0,
QStyleHintReturn *returnData = 0) const {
if (hint == SH_ItemView_ShowDecorationSelected)
return 0;
else
return ManhattanStyle::styleHint(hint, option, widget, returnData);
}
};
///////////////////////////////////////////////////////////////////////
//
// QmlConsoleView
//
///////////////////////////////////////////////////////////////////////
QmlConsoleView::QmlConsoleView(QWidget *parent) :
Utils::TreeView(parent)
{
setFrameStyle(QFrame::NoFrame);
setHeaderHidden(true);
setRootIsDecorated(false);
setUniformRowHeights(true);
setEditTriggers(QAbstractItemView::AllEditTriggers);
setStyleSheet(QLatin1String("QTreeView::branch:has-siblings:!adjoins-item {"
"border-image: none;"
"image: none; }"
"QTreeView::branch:has-siblings:adjoins-item {"
"border-image: none;"
"image: none; }"
"QTreeView::branch:!has-children:!has-siblings:adjoins-item {"
"border-image: none;"
"image: none; }"
"QTreeView::branch:has-children:!has-siblings:closed,"
"QTreeView::branch:closed:has-children:has-siblings {"
"border-image: none;"
"image: none; }"
"QTreeView::branch:open:has-children:!has-siblings,"
"QTreeView::branch:open:has-children:has-siblings {"
"border-image: none;"
"image: none; }"));
QString baseName = QApplication::style()->objectName();
if (Utils::HostOsInfo::isAnyUnixHost() && !Utils::HostOsInfo::isMacHost()
&& baseName == QLatin1String("windows")) {
// Sometimes we get the standard windows 95 style as a fallback
if (QStyleFactory::keys().contains(QLatin1String("Fusion"))) {
baseName = QLatin1String("fusion"); // Qt5
} else { // Qt4
// e.g. if we are running on a KDE4 desktop
QByteArray desktopEnvironment = qgetenv("DESKTOP_SESSION");
if (desktopEnvironment == "kde")
baseName = QLatin1String("plastique");
else
baseName = QLatin1String("cleanlooks");
}
}
QmlConsoleViewStyle *style = new QmlConsoleViewStyle(baseName);
setStyle(style);
style->setParent(this);
setHorizontalScrollBarPolicy(Qt::ScrollBarAlwaysOff);
setVerticalScrollBarPolicy(Qt::ScrollBarAlwaysOn);
setVerticalScrollMode(QAbstractItemView::ScrollPerPixel);
horizontalScrollBar()->setSingleStep(20);
verticalScrollBar()->setSingleStep(20);
connect(this, SIGNAL(activated(QModelIndex)), SLOT(onRowActivated(QModelIndex)));
}
void QmlConsoleView::onScrollToBottom()
{
// Keep scrolling to bottom if scroll bar is not at maximum()
if (verticalScrollBar()->value() != verticalScrollBar()->maximum())
scrollToBottom();
}
void QmlConsoleView::mousePressEvent(QMouseEvent *event)
{
QPoint pos = event->pos();
QModelIndex index = indexAt(pos);
if (index.isValid()) {
ConsoleItem::ItemType type = (ConsoleItem::ItemType)index.data(
QmlConsoleItemModel::TypeRole).toInt();
bool handled = false;
if (type == ConsoleItem::UndefinedType) {<|fim▁hole|> if (positions.expandCollapseIcon().contains(pos)) {
if (isExpanded(index))
setExpanded(index, false);
else
setExpanded(index, true);
handled = true;
}
}
if (!handled)
Utils::TreeView::mousePressEvent(event);
} else {
selectionModel()->setCurrentIndex(model()->index(model()->rowCount() - 1, 0),
QItemSelectionModel::ClearAndSelect);
}
}
void QmlConsoleView::resizeEvent(QResizeEvent *e)
{
static_cast<QmlConsoleItemDelegate *>(itemDelegate())->emitSizeHintChanged(
selectionModel()->currentIndex());
Utils::TreeView::resizeEvent(e);
}
void QmlConsoleView::drawBranches(QPainter *painter, const QRect &rect,
const QModelIndex &index) const
{
static_cast<QmlConsoleItemDelegate *>(itemDelegate())->drawBackground(painter, rect, index,
false);
Utils::TreeView::drawBranches(painter, rect, index);
}
void QmlConsoleView::contextMenuEvent(QContextMenuEvent *event)
{
QModelIndex itemIndex = indexAt(event->pos());
QMenu menu;
QAction *copy = new QAction(tr("&Copy"), this);
copy->setEnabled(itemIndex.isValid());
menu.addAction(copy);
QAction *show = new QAction(tr("&Show in Editor"), this);
show->setEnabled(canShowItemInTextEditor(itemIndex));
menu.addAction(show);
menu.addSeparator();
QAction *clear = new QAction(tr("C&lear"), this);
menu.addAction(clear);
QAction *a = menu.exec(event->globalPos());
if (a == 0)
return;
if (a == copy) {
copyToClipboard(itemIndex);
} else if (a == show) {
onRowActivated(itemIndex);
} else if (a == clear) {
QAbstractProxyModel *proxyModel = qobject_cast<QAbstractProxyModel *>(model());
QmlConsoleItemModel *handler = qobject_cast<QmlConsoleItemModel *>(
proxyModel->sourceModel());
handler->clear();
}
}
void QmlConsoleView::onRowActivated(const QModelIndex &index)
{
if (!index.isValid())
return;
// See if we have file and line Info
QString filePath = model()->data(index, QmlConsoleItemModel::FileRole).toString();
const QUrl fileUrl = QUrl(filePath);
if (fileUrl.isLocalFile())
filePath = fileUrl.toLocalFile();
if (!filePath.isEmpty()) {
QFileInfo fi(filePath);
if (fi.exists() && fi.isFile() && fi.isReadable()) {
int line = model()->data(index, QmlConsoleItemModel::LineRole).toInt();
Core::EditorManager::openEditorAt(fi.canonicalFilePath(), line);
}
}
}
void QmlConsoleView::copyToClipboard(const QModelIndex &index)
{
if (!index.isValid())
return;
QString contents = model()->data(index, QmlConsoleItemModel::ExpressionRole).toString();
// See if we have file and line Info
QString filePath = model()->data(index, QmlConsoleItemModel::FileRole).toString();
const QUrl fileUrl = QUrl(filePath);
if (fileUrl.isLocalFile())
filePath = fileUrl.toLocalFile();
if (!filePath.isEmpty()) {
contents = QString::fromLatin1("%1 %2: %3").arg(contents).arg(filePath).arg(
model()->data(index, QmlConsoleItemModel::LineRole).toString());
}
QClipboard *cb = QApplication::clipboard();
cb->setText(contents);
}
bool QmlConsoleView::canShowItemInTextEditor(const QModelIndex &index)
{
if (!index.isValid())
return false;
// See if we have file and line Info
QString filePath = model()->data(index, QmlConsoleItemModel::FileRole).toString();
const QUrl fileUrl = QUrl(filePath);
if (fileUrl.isLocalFile())
filePath = fileUrl.toLocalFile();
if (!filePath.isEmpty()) {
QFileInfo fi(filePath);
if (fi.exists() && fi.isFile() && fi.isReadable())
return true;
}
return false;
}
} // Internal
} // QmlJSTools<|fim▁end|>
|
bool showTypeIcon = index.parent() == QModelIndex();
ConsoleItemPositions positions(visualRect(index), viewOptions().font, showTypeIcon,
true);
|
<|file_name|>mv-elsewhere.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# Copyright (C) 2012 dcodix
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
""" This script will take filenames from the stdin and copy/move them to a
directory destination preserving the directory tree and atributes.
Most of the functionality is taken from shutil module.
"""
import os
import sys
import stat<|fim▁hole|>from shutil import *
import time
def copydirtree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""This function is a modification of shutil copytree
which only copy the directories of a tree but not
the files or links.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
copydirtree(srcname, dstname, symlinks, ignore, copy_function)
else:
continue
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def printmessage(logstring):
print('['+str(time.time())+'] '+logstring)
def verbosemessage(logstring):
if verbose:
printmessage(logstring)
def debugmessage(logstring):
if debuging:
printmessage(logstring)
def main():
scriptname = 'mv-elsewhere.py'
dst = ''
filemove = False
override = False
readstdin = True
global verbose
global debuging
verbose = False
debuging = False
exclude = ''
excludelist = ''
#GET ARGS
parser = argparse.ArgumentParser(description='Move files')
parser.add_argument('-d', '--destdir', nargs=1, help='destination directory')
parser.add_argument('-D', '--debuging', help='debug', action="store_true")
parser.add_argument('-m', '--move', help='move instead of copy', action="store_true")
parser.add_argument('-o', '--override', help='override in destination', action="store_true")
parser.add_argument('-v', '--verbose', help='verbose', action="store_true")
parser.add_argument('-e', '--exclude', nargs='+', help='esclude list')
args = parser.parse_args()
if args.destdir:
dst = args.destdir[0]
if args.debuging:
verbose = True
debuging = True
if args.move:
filemove = True
if args.override:
override = True
if args.verbose:
verbose = True
if args.exclude:
excludelist = args.exclude
# PROCESS
nfiles = 0
while True:
excludefile = False
if readstdin: #This condition is meant to add the future posibility to read files directly from a file instead of stdin.
file1 = sys.stdin.readline()
if not file1:
break
file1 = file1.rstrip()
debugmessage('file '+file1)
fpath = os.path.dirname(file1)
if len(fpath) == 0:
fpath = file1
if debuging:
print('fpath '+fpath)
if len(excludelist) != 0:
for exclude in excludelist:
if exclude in file1:
excludefile = True
debugmessage('file '+file1+' will be excluded')
dfile = dst + '/' + file1
dpath = dst + '/' + fpath
if not os.path.isdir(dpath):
verbosemessage('COPYNG TREE: from '+fpath+' to '+dpath)
copydirtree(fpath, dpath)
if not os.path.isdir(file1) and not excludefile:
if not os.path.exists(dfile) or override:
if filemove:
verbosemessage('MOVING: '+file1+' to '+dfile)
move(file1, dfile)
nfiles = nfiles + 1
else:
verbosemessage('COPYING: '+file1+' to '+dfile)
copy2(file1, dfile)
nfiles = nfiles + 1
else:
verbosemessage('NOT OVERRIDING: '+dfile)
pass
else:
if excludefile:
verbosemessage('EXCLUDED: '+file1)
pass
if nfiles == 0:
printmessage('No files have been moved or copied.')
else:
if filemove:
printmessage(str(nfiles)+' files have been moved.')
else:
printmessage(str(nfiles)+' files have been copied.')
if __name__ == "__main__":
main()<|fim▁end|>
|
import errno
import getopt
import argparse
|
<|file_name|>itchcork.js<|end_file_name|><|fim▁begin|>define(['./Suite','./SuiteView','./Spy', './Verify'],function (Suite,SuiteView,Spy,Verify) {
var itchCork = {
Suite: Suite,
suiteView: new SuiteView(),
Spy: Spy,
Verify: Verify
};
itchCork.Suite.prototype.linkView(itchCork.suiteView);
<|fim▁hole|><|fim▁end|>
|
window._bTestResults = {};
return itchCork;
});
|
<|file_name|>ClientConfig.java<|end_file_name|><|fim▁begin|>package de.danoeh.antennapod.core;
import android.content.Context;
import de.danoeh.antennapod.core.cast.CastManager;
import de.danoeh.antennapod.core.preferences.PlaybackPreferences;
import de.danoeh.antennapod.core.preferences.SleepTimerPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.storage.PodDBAdapter;
import de.danoeh.antennapod.core.util.NetworkUtils;<|fim▁hole|>/**
* Stores callbacks for core classes like Services, DB classes etc. and other configuration variables.
* Apps using the core module of AntennaPod should register implementations of all interfaces here.
*/
public class ClientConfig {
private ClientConfig(){}
/**
* Should be used when setting User-Agent header for HTTP-requests.
*/
public static String USER_AGENT;
public static ApplicationCallbacks applicationCallbacks;
public static DownloadServiceCallbacks downloadServiceCallbacks;
public static PlaybackServiceCallbacks playbackServiceCallbacks;
public static GpodnetCallbacks gpodnetCallbacks;
public static FlattrCallbacks flattrCallbacks;
public static DBTasksCallbacks dbTasksCallbacks;
public static CastCallbacks castCallbacks;
private static boolean initialized = false;
public static synchronized void initialize(Context context) {
if(initialized) {
return;
}
PodDBAdapter.init(context);
UserPreferences.init(context);
UpdateManager.init(context);
PlaybackPreferences.init(context);
NetworkUtils.init(context);
CastManager.init(context);
SleepTimerPreferences.init(context);
initialized = true;
}
}<|fim▁end|>
| |
<|file_name|>chat.rs<|end_file_name|><|fim▁begin|>/*
copyright: (c) 2013-2019 by Blockstack PBC, a public benefit corporation.
This file is part of Blockstack.
Blockstack is free software. You may redistribute or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY, including without the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
*/
use std::mem;
use net::PeerAddress;
use net::Neighbor;
use net::NeighborKey;
use net::Error as net_error;
use net::db::PeerDB;
use net::asn::ASEntry4;
use net::*;
use net::codec::*;
use net::relay::*;
use net::StacksMessage;
use net::StacksP2P;
use net::GetBlocksInv;
use net::BLOCKS_INV_DATA_MAX_BITLEN;
use net::connection::ConnectionP2P;
use net::connection::ReplyHandleP2P;
use net::connection::ConnectionOptions;
use net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY;
use net::db::*;
use util::db::Error as db_error;
use util::db::DBConn;
use util::secp256k1::Secp256k1PublicKey;
use util::secp256k1::Secp256k1PrivateKey;
use burnchains::PublicKey;
use chainstate::burn::db::burndb;
use chainstate::burn::db::burndb::BurnDB;
use chainstate::stacks::db::StacksChainState;
use chainstate::stacks::StacksBlockHeader;
use chainstate::stacks::StacksPublicKey;
use burnchains::Burnchain;
use burnchains::BurnchainView;
use std::net::SocketAddr;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::collections::HashSet;
use std::io::Read;
use std::io::Write;
use std::convert::TryFrom;
use util::log;
use util::get_epoch_time_secs;
use util::hash::to_hex;
use rand;
use rand::Rng;
use rand::thread_rng;
// did we or did we not successfully send a message?
#[derive(Debug, Clone)]
pub struct NeighborHealthPoint {
pub success: bool,
pub time: u64
}
impl Default for NeighborHealthPoint {
fn default() -> NeighborHealthPoint {
NeighborHealthPoint {
success: false,
time: 0
}
}
}
pub const NUM_HEALTH_POINTS : usize = 32;
pub const HEALTH_POINT_LIFETIME : u64 = 12 * 3600; // 12 hours
pub const NUM_BLOCK_POINTS : usize = 32;
pub const BLOCK_POINT_LIFETIME : u64 = 600;
pub const MAX_PEER_HEARTBEAT_INTERVAL : usize = 3600 * 6; // 6 hours
/// Statistics on relayer hints in Stacks messages. Used to deduce network choke points.
#[derive(Debug, Clone)]
pub struct RelayStats {
pub num_messages: u64, // how many messages a relayer has pushed to this neighbor
pub num_bytes: u64, // how many bytes a relayer has pushed to this neighbor
pub last_seen: u64, // the last time (in seconds) since we've seen this relayer
}
impl RelayStats {
pub fn new() -> RelayStats {
RelayStats {
num_messages: 0,
num_bytes: 0,
last_seen: 0
}
}
/// Combine two relayers' stats
pub fn merge(&mut self, other: RelayStats) {
if other.last_seen > self.last_seen {
self.num_messages += other.num_messages;
self.num_bytes += other.num_bytes;
self.last_seen = get_epoch_time_secs();
}
}
}
#[derive(Debug, Clone)]
pub struct NeighborStats {
pub outbound: bool,
pub first_contact_time: u64,
pub last_contact_time: u64,
pub last_send_time: u64,
pub last_recv_time: u64,
pub last_handshake_time: u64,
pub bytes_tx: u64,
pub bytes_rx: u64,
pub msgs_tx: u64,
pub msgs_rx: u64,
pub msgs_rx_unsolicited: u64,
pub msgs_err: u64,
pub healthpoints: VecDeque<NeighborHealthPoint>,
pub msg_rx_counts: HashMap<StacksMessageID, u64>,
pub block_push_rx_counts: VecDeque<(u64, u64)>, // (count, num bytes)
pub microblocks_push_rx_counts: VecDeque<(u64, u64)>, // (count, num bytes)
pub transaction_push_rx_counts: VecDeque<(u64, u64)>, // (count, num bytes)
pub relayed_messages: HashMap<NeighborAddress, RelayStats>
}
impl NeighborStats {
pub fn new(outbound: bool) -> NeighborStats {
NeighborStats {
outbound: outbound,
first_contact_time: 0,
last_contact_time: 0,
last_send_time: 0,
last_recv_time: 0,
last_handshake_time: 0,
bytes_tx: 0,
bytes_rx: 0,
msgs_tx: 0,
msgs_rx: 0,
msgs_rx_unsolicited: 0,
msgs_err: 0,
healthpoints: VecDeque::new(),
msg_rx_counts: HashMap::new(),
block_push_rx_counts: VecDeque::new(),
microblocks_push_rx_counts: VecDeque::new(),
transaction_push_rx_counts: VecDeque::new(),
relayed_messages: HashMap::new(),
}
}
pub fn add_healthpoint(&mut self, success: bool) -> () {
let hp = NeighborHealthPoint {
success: success,
time: get_epoch_time_secs()
};
self.healthpoints.push_back(hp);
while self.healthpoints.len() > NUM_HEALTH_POINTS {
self.healthpoints.pop_front();
}
}
pub fn add_block_push(&mut self, message_size: u64) -> () {
self.block_push_rx_counts.push_back((get_epoch_time_secs(), message_size));
while self.block_push_rx_counts.len() > NUM_BLOCK_POINTS {
self.block_push_rx_counts.pop_front();
}
}
pub fn add_microblocks_push(&mut self, message_size: u64) -> () {
self.microblocks_push_rx_counts.push_back((get_epoch_time_secs(), message_size));
while self.microblocks_push_rx_counts.len() > NUM_BLOCK_POINTS {
self.microblocks_push_rx_counts.pop_front();
}
}
pub fn add_transaction_push(&mut self, message_size: u64) -> () {
self.transaction_push_rx_counts.push_back((get_epoch_time_secs(), message_size));
while self.transaction_push_rx_counts.len() > NUM_BLOCK_POINTS {
self.transaction_push_rx_counts.pop_front();
}
}
pub fn add_relayer(&mut self, addr: NeighborAddress, num_bytes: u64) -> () {
if let Some(stats) = self.relayed_messages.get_mut(&addr) {
stats.num_messages += 1;
stats.num_bytes += num_bytes;
stats.last_seen = get_epoch_time_secs();
}
else {
let info = RelayStats {
num_messages: 1,
num_bytes: num_bytes,
last_seen: get_epoch_time_secs(),
};
self.relayed_messages.insert(addr, info);
}
}
pub fn take_relayers(&mut self) -> HashMap<NeighborAddress, RelayStats> {
let ret = mem::replace(&mut self.relayed_messages, HashMap::new());
ret
}
/// Get a peer's perceived health -- the last $NUM_HEALTH_POINTS successful messages divided by
/// the total.
pub fn get_health_score(&self) -> f64 {
// if we don't have enough data, assume 50%
if self.healthpoints.len() < NUM_HEALTH_POINTS {
return 0.5;
}
let mut successful = 0;
let mut total = 0;
let now = get_epoch_time_secs();
for hp in self.healthpoints.iter() {
// penalize stale data points -- only look at recent data
if hp.success && now < hp.time + HEALTH_POINT_LIFETIME {
successful += 1;
}
total += 1;
}
(successful as f64) / (total as f64)
}
fn get_bandwidth(rx_counts: &VecDeque<(u64, u64)>, lifetime: u64) -> f64 {
if rx_counts.len() < 2 {
return 0.0;
}
let elapsed_time_start = rx_counts.front().unwrap().0;
let elapsed_time_end = rx_counts.back().unwrap().0;
let now = get_epoch_time_secs();
let mut total_bytes = 0;
for (time, size) in rx_counts.iter() {
if now < time + lifetime {
total_bytes += size;
}
}
if elapsed_time_start == elapsed_time_end {
total_bytes as f64
}
else {
(total_bytes as f64) / ((elapsed_time_end - elapsed_time_start) as f64)
}
}
/// Get a peer's total block-push bandwidth usage.
pub fn get_block_push_bandwidth(&self) -> f64 {
NeighborStats::get_bandwidth(&self.block_push_rx_counts, BLOCK_POINT_LIFETIME)
}
/// Get a peer's total microblock-push bandwidth usage.
pub fn get_microblocks_push_bandwidth(&self) -> f64 {
NeighborStats::get_bandwidth(&self.microblocks_push_rx_counts, BLOCK_POINT_LIFETIME)
}
/// Get a peer's total transaction-push bandwidth usage
pub fn get_transaction_push_bandwidth(&self) -> f64 {
NeighborStats::get_bandwidth(&self.transaction_push_rx_counts, BLOCK_POINT_LIFETIME)
}
/// Determine how many of a particular message this peer has received
pub fn get_message_recv_count(&self, msg_id: StacksMessageID) -> u64 {
*(self.msg_rx_counts.get(&msg_id).unwrap_or(&0))
}
}
/// P2P ongoing conversation with another Stacks peer
pub struct ConversationP2P {
pub instantiated: u64,
pub network_id: u32,
pub version: u32,
pub connection: ConnectionP2P,
pub conn_id: usize,
pub burnchain: Burnchain, // copy of our burnchain config
pub heartbeat: u32, // how often do we send heartbeats?
pub peer_network_id: u32,
pub peer_version: u32,
pub peer_services: u16,
pub peer_addrbytes: PeerAddress, // from socketaddr
pub peer_port: u16, // from socketaddr
pub handshake_addrbytes: PeerAddress, // from handshake
pub handshake_port: u16, // from handshake
pub peer_heartbeat: u32, // how often do we need to ping the remote peer?
pub peer_expire_block_height: u64, // when does the peer's key expire?
pub data_url: UrlString, // where does this peer's data live? Set to a 0-length string if not known.
// highest block height and consensus hash this peer has seen
pub burnchain_tip_height: u64,
pub burnchain_tip_consensus_hash: ConsensusHash,
pub burnchain_stable_tip_height: u64,
pub burnchain_stable_tip_consensus_hash: ConsensusHash,
pub stats: NeighborStats,
// outbound replies
pub reply_handles: VecDeque<ReplyHandleP2P>
}
impl fmt::Display for ConversationP2P {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "convo:id={},outbound={},peer={:?}", self.conn_id, self.stats.outbound, &self.to_neighbor_key())
}
}
impl fmt::Debug for ConversationP2P {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "convo:id={},outbound={},peer={:?}", self.conn_id, self.stats.outbound, &self.to_neighbor_key())
}
}
impl NeighborKey {
pub fn from_handshake(peer_version: u32, network_id: u32, handshake_data: &HandshakeData) -> NeighborKey {
NeighborKey {
peer_version: peer_version,
network_id: network_id,
addrbytes: handshake_data.addrbytes.clone(),
port: handshake_data.port,
}
}
pub fn from_socketaddr(peer_version: u32, network_id: u32, addr: &SocketAddr) -> NeighborKey {
NeighborKey {
peer_version: peer_version,
network_id: network_id,
addrbytes: PeerAddress::from_socketaddr(addr),
port: addr.port(),
}
}
}
impl Neighbor {
/// Update fields in this neighbor from a given handshake.
/// Also, re-calculate the peer's ASN and organization ID
pub fn handshake_update(&mut self, conn: &DBConn, handshake_data: &HandshakeData) -> Result<(), net_error> {
let pubk = handshake_data.node_public_key.to_public_key()?;
let asn_opt = PeerDB::asn_lookup(conn, &handshake_data.addrbytes)
.map_err(net_error::DBError)?;
let asn = match asn_opt {
Some(a) => a,
None => 0
};
self.public_key = pubk;
self.expire_block = handshake_data.expire_block_height;
self.last_contact_time = get_epoch_time_secs();
if asn != 0 {
self.asn = asn;
self.org = asn; // TODO; AS number is a place-holder for an organization ID (an organization can own multiple ASs)
}
Ok(())
}
pub fn from_handshake(conn: &DBConn, peer_version: u32, network_id: u32, handshake_data: &HandshakeData) -> Result<Neighbor, net_error> {
let addr = NeighborKey::from_handshake(peer_version, network_id, handshake_data);
let pubk = handshake_data.node_public_key.to_public_key()?;
let peer_opt = PeerDB::get_peer(conn, network_id, &addr.addrbytes, addr.port)
.map_err(net_error::DBError)?;
let mut neighbor = match peer_opt {
Some(neighbor) => {
let mut ret = neighbor.clone();
ret.addr = addr.clone();
ret
},
None => {
let ret = Neighbor::empty(&addr, &pubk, handshake_data.expire_block_height);
ret
}
};
#[cfg(test)]
{
// setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization
// for this peer
use std::env;
match env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port).to_string()) {
Ok(asn_str) => {
neighbor.asn = asn_str.parse().unwrap();
neighbor.org = neighbor.asn;
test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn);
},
Err(_) => {}
};
}
neighbor.handshake_update(conn, &handshake_data)?;
Ok(neighbor)
}
pub fn from_conversation(conn: &DBConn, convo: &ConversationP2P) -> Result<Option<Neighbor>, net_error> {
let addr = convo.to_neighbor_key();
let peer_opt = PeerDB::get_peer(conn, addr.network_id, &addr.addrbytes, addr.port)
.map_err(net_error::DBError)?;
match peer_opt {
None => {
Ok(None)
},
Some(mut peer) => {
if peer.asn == 0 {
let asn_opt = PeerDB::asn_lookup(conn, &addr.addrbytes)
.map_err(net_error::DBError)?;
match asn_opt {
Some(a) => {
if a != 0 {
peer.asn = a;
}
},
None => {}
};
}
Ok(Some(peer))
}
}
}
}
<|fim▁hole|> instantiated: get_epoch_time_secs(),
network_id: network_id,
version: version,
connection: ConnectionP2P::new(StacksP2P::new(), conn_opts, None),
conn_id: conn_id,
heartbeat: conn_opts.heartbeat,
burnchain: burnchain.clone(),
peer_network_id: 0,
peer_version: 0,
peer_addrbytes: PeerAddress::from_socketaddr(peer_addr),
peer_port: peer_addr.port(),
handshake_addrbytes: PeerAddress([0u8; 16]),
handshake_port: 0,
peer_heartbeat: 0,
peer_services: 0,
peer_expire_block_height: 0,
data_url: UrlString::try_from("".to_string()).unwrap(),
burnchain_tip_height: 0,
burnchain_tip_consensus_hash: ConsensusHash([0x00; 20]),
burnchain_stable_tip_height: 0,
burnchain_stable_tip_consensus_hash: ConsensusHash([0x00; 20]),
stats: NeighborStats::new(outbound),
reply_handles: VecDeque::new(),
}
}
pub fn set_public_key(&mut self, pubkey_opt: Option<Secp256k1PublicKey>) -> () {
self.connection.set_public_key(pubkey_opt);
}
pub fn to_neighbor_key(&self) -> NeighborKey {
NeighborKey {
peer_version: self.peer_version,
network_id: self.peer_network_id,
addrbytes: self.peer_addrbytes.clone(),
port: self.peer_port
}
}
pub fn to_handshake_neighbor_key(&self) -> NeighborKey {
NeighborKey {
peer_version: self.peer_version,
network_id: self.peer_network_id,
addrbytes: self.handshake_addrbytes.clone(),
port: self.handshake_port
}
}
pub fn to_neighbor_address(&self) -> NeighborAddress {
let pubkh =
if let Some(ref pubk) = self.ref_public_key() {
Hash160::from_data(&pubk.to_bytes())
}
else {
Hash160([0u8; 20])
};
NeighborAddress {
addrbytes: self.peer_addrbytes.clone(),
port: self.peer_port,
public_key_hash: pubkh
}
}
pub fn to_handshake_neighbor_address(&self) -> NeighborAddress {
let pubkh =
if let Some(ref pubk) = self.ref_public_key() {
Hash160::from_data(&pubk.to_bytes())
}
else {
Hash160([0u8; 20])
};
NeighborAddress {
addrbytes: self.handshake_addrbytes.clone(),
port: self.handshake_port,
public_key_hash: pubkh
}
}
pub fn is_outbound(&self) -> bool {
self.stats.outbound
}
pub fn is_authenticated(&self) -> bool {
self.connection.has_public_key()
}
pub fn get_public_key(&self) -> Option<StacksPublicKey> {
self.connection.get_public_key()
}
pub fn ref_public_key(&self) -> Option<&StacksPublicKey> {
self.connection.ref_public_key()
}
pub fn get_burnchain_tip_height(&self) -> u64 {
self.burnchain_tip_height
}
pub fn get_stable_burnchain_tip_height(&self) -> u64 {
self.burnchain_stable_tip_height
}
pub fn get_burnchain_tip_consensus_hash(&self) -> ConsensusHash {
self.burnchain_tip_consensus_hash.clone()
}
pub fn get_stable_burnchain_tip_consensus_hash(&self) -> ConsensusHash {
self.burnchain_stable_tip_consensus_hash.clone()
}
/// Determine whether or not a given (height, consensus_hash) pair _disagrees_ with our
/// burnchain view. If it does, return true. If it doesn't (including if the given pair is
/// simply absent from the chain_view), then return False.
fn check_consensus_hash_disagreement(block_height: u64, their_consensus_hash: &ConsensusHash, chain_view: &BurnchainView) -> bool {
let ch = match chain_view.last_consensus_hashes.get(&block_height) {
Some(ref ch) => {
ch.clone()
},
None => {
// not present; can't prove disagreement (assume the remote peer is just stale)
return false;
}
};
*ch != *their_consensus_hash
}
/// Validate an inbound message's preamble against our knowledge of the burn chain.
/// Return Ok(true) if we can proceed
/// Return Ok(false) if we can't proceed, but the remote peer is not in violation of the protocol
/// Return Err(net_error::InvalidMessage) if the remote peer returns an invalid message in
/// violation of the protocol
pub fn is_preamble_valid(&self, msg: &StacksMessage, chain_view: &BurnchainView) -> Result<bool, net_error> {
if msg.preamble.network_id != self.network_id {
// not on our network
test_debug!("wrong network ID: {:x} != {:x}", msg.preamble.network_id, self.network_id);
return Err(net_error::InvalidMessage);
}
if (msg.preamble.peer_version & 0xff000000) != (self.version & 0xff000000) {
// major version mismatch
test_debug!("wrong peer version: {:x} != {:x}", msg.preamble.peer_version, self.version);
return Err(net_error::InvalidMessage);
}
if msg.preamble.burn_stable_block_height.checked_add(self.burnchain.stable_confirmations as u64) != Some(msg.preamble.burn_block_height) {
// invalid message
test_debug!("wrong stable block height: {:?} != {}", msg.preamble.burn_stable_block_height.checked_add(self.burnchain.stable_confirmations as u64), msg.preamble.burn_block_height);
return Err(net_error::InvalidMessage);
}
if msg.preamble.burn_stable_block_height > chain_view.burn_block_height + MAX_NEIGHBOR_BLOCK_DELAY {
// this node is too far ahead of us, but otherwise still potentially valid
test_debug!("remote peer is too far ahead of us: {} > {}", msg.preamble.burn_stable_block_height, chain_view.burn_block_height);
return Ok(false);
}
// must agree on stable consensus hash
let rules_disagree = ConversationP2P::check_consensus_hash_disagreement(msg.preamble.burn_stable_block_height, &msg.preamble.burn_stable_consensus_hash, chain_view);
if rules_disagree {
// remote peer disagrees on stable consensus hash -- follows different rules than us
test_debug!("Consensus hash mismatch in preamble");
return Err(net_error::InvalidMessage);
}
Ok(true)
}
/// Get next message sequence number
fn next_seq(&mut self) -> u32 {
let mut rng = thread_rng();
rng.gen::<u32>()
}
/// Generate a signed message for this conversation
pub fn sign_message(&mut self, chain_view: &BurnchainView, private_key: &Secp256k1PrivateKey, payload: StacksMessageType) -> Result<StacksMessage, net_error> {
let mut msg = StacksMessage::from_chain_view(self.version, self.network_id, chain_view, payload);
msg.sign(self.next_seq(), private_key)?;
Ok(msg)
}
/// Generate a signed forwarded message for this conversation.
/// Include ourselves as the latest relayer.
pub fn sign_relay_message(&mut self, local_peer: &LocalPeer, chain_view: &BurnchainView, mut relay_hints: Vec<RelayData>, payload: StacksMessageType) -> Result<StacksMessage, net_error> {
let mut msg = StacksMessage::from_chain_view(self.version, self.network_id, chain_view, payload);
msg.relayers.append(&mut relay_hints);
msg.sign_relay(&local_peer.private_key, self.next_seq(), &local_peer.to_neighbor_addr())?;
Ok(msg)
}
/// Generate a signed reply for this conversation
pub fn sign_reply(&mut self, chain_view: &BurnchainView, private_key: &Secp256k1PrivateKey, payload: StacksMessageType, seq: u32) -> Result<StacksMessage, net_error> {
let mut msg = StacksMessage::from_chain_view(self.version, self.network_id, chain_view, payload);
msg.sign(seq, private_key)?;
Ok(msg)
}
/// sign and reply a message
fn sign_and_reply(&mut self, local_peer: &LocalPeer, burnchain_view: &BurnchainView, request_preamble: &Preamble, reply_message: StacksMessageType) -> Result<ReplyHandleP2P, net_error> {
let _msgtype = reply_message.get_message_name().to_owned();
let reply = self.sign_reply(burnchain_view, &local_peer.private_key, reply_message, request_preamble.seq)?;
let reply_handle = self.relay_signed_message(reply)
.map_err(|e| {
debug!("Unable to reply a {}: {:?}", _msgtype, &e);
e
})?;
self.stats.msgs_tx += 1;
Ok(reply_handle)
}
/// Sign and forward a message
pub fn sign_and_forward(&mut self, local_peer: &LocalPeer, burnchain_view: &BurnchainView, relay_hints: Vec<RelayData>, forward_message: StacksMessageType) -> Result<ReplyHandleP2P, net_error> {
let _msgtype = forward_message.get_message_name().to_owned();
let fwd = self.sign_relay_message(local_peer, burnchain_view, relay_hints, forward_message)?;
let fwd_handle = self.relay_signed_message(fwd)
.map_err(|e| {
debug!("Unable to forward a {}: {:?}", _msgtype, &e);
e
})?;
self.stats.msgs_tx += 1;
Ok(fwd_handle)
}
/// Reply a NACK
fn reply_nack(&mut self, local_peer: &LocalPeer, burnchain_view: &BurnchainView, preamble: &Preamble, nack_code: u32) -> Result<ReplyHandleP2P, net_error> {
let nack_payload = StacksMessageType::Nack(NackData::new(nack_code));
self.sign_and_reply(local_peer, burnchain_view, preamble, nack_payload)
}
/// Queue up this message to this peer, and update our stats.
/// This is a non-blocking operation. The caller needs to call .try_flush() or .flush() on the
/// returned Write to finish sending.
pub fn relay_signed_message(&mut self, msg: StacksMessage) -> Result<ReplyHandleP2P, net_error> {
let _name = msg.get_message_name();
let _seq = msg.request_id();
let mut handle = self.connection.make_relay_handle(self.conn_id)?;
msg.consensus_serialize(&mut handle)?;
self.stats.msgs_tx += 1;
debug!("{:?}: relay-send({}) {} seq {}", &self, self.stats.msgs_tx, _name, _seq);
Ok(handle)
}
/// Queue up this message to this peer, and update our stats. Expect a reply.
/// This is a non-blocking operation. The caller needs to call .try_flush() or .flush() on the
/// returned handle to finish sending.
pub fn send_signed_request(&mut self, msg: StacksMessage, ttl: u64) -> Result<ReplyHandleP2P, net_error> {
let _name = msg.get_message_name();
let _seq = msg.request_id();
let mut handle = self.connection.make_request_handle(msg.request_id(), ttl, self.conn_id)?;
msg.consensus_serialize(&mut handle)?;
self.stats.msgs_tx += 1;
debug!("{:?}: request-send({}) {} seq {}", &self, self.stats.msgs_tx, _name, _seq);
Ok(handle)
}
/// Validate a handshake request.
/// Return Err(...) if the handshake request was invalid.
fn validate_handshake(&mut self, local_peer: &LocalPeer, chain_view: &BurnchainView, message: &mut StacksMessage) -> Result<(), net_error> {
let handshake_data = match message.payload {
StacksMessageType::Handshake(ref mut data) => data.clone(),
_ => panic!("Message is not a handshake")
};
match self.connection.get_public_key() {
None => {
// if we don't yet have a public key for this node, verify the message.
// if it's improperly signed, it's probably a poorly-timed re-key request (but either way the message should be rejected)
message.verify_secp256k1(&handshake_data.node_public_key)
.map_err(|_e| {
debug!("{:?}: invalid handshake: not signed with given public key", &self);
net_error::InvalidMessage
})?;
},
Some(_) => {
// for outbound connections, the self-reported address must match socket address if we already have a public key.
// (not the case for inbound connections, since the peer socket address we see may
// not be the same as the address the remote peer thinks it has).
// The only exception to this is if the remote peer does not yet know its own
// public IP address, in which case, its handshake addrbytes will be the
// any-network bind address (0.0.0.0 or ::)
if self.stats.outbound && (!handshake_data.addrbytes.is_anynet() && (self.peer_addrbytes != handshake_data.addrbytes || self.peer_port != handshake_data.port)) {
// wrong peer address
debug!("{:?}: invalid handshake -- wrong addr/port ({:?}:{:?})", &self, &handshake_data.addrbytes, handshake_data.port);
return Err(net_error::InvalidHandshake);
}
}
};
let their_public_key_res = handshake_data.node_public_key.to_public_key();
match their_public_key_res {
Ok(_) => {},
Err(_e) => {
// bad public key
debug!("{:?}: invalid handshake -- invalid public key", &self);
return Err(net_error::InvalidMessage);
}
};
if handshake_data.expire_block_height <= chain_view.burn_block_height {
// already stale
debug!("{:?}: invalid handshake -- stale public key (expired at {})", &self, handshake_data.expire_block_height);
return Err(net_error::InvalidHandshake);
}
// the handshake cannot come from us
if handshake_data.node_public_key == StacksPublicKeyBuffer::from_public_key(&Secp256k1PublicKey::from_private(&local_peer.private_key)) {
debug!("{:?}: invalid handshake -- got a handshake from myself", &self);
return Err(net_error::InvalidHandshake);
}
Ok(())
}
/// Update connection state from handshake data.
/// Returns true if we learned a new public key; false if not
pub fn update_from_handshake_data(&mut self, preamble: &Preamble, handshake_data: &HandshakeData) -> Result<bool, net_error> {
let pubk = handshake_data.node_public_key.to_public_key()?;
self.peer_version = preamble.peer_version;
self.peer_network_id = preamble.network_id;
self.peer_services = handshake_data.services;
self.peer_expire_block_height = handshake_data.expire_block_height;
self.handshake_addrbytes = handshake_data.addrbytes.clone();
self.handshake_port = handshake_data.port;
self.data_url = handshake_data.data_url.clone();
let mut updated = false;
let cur_pubk_opt = self.connection.get_public_key();
if let Some(cur_pubk) = cur_pubk_opt {
if pubk != cur_pubk {
test_debug!("{:?}: Upgrade key {:?} to {:?} expires {:?}", &self, &to_hex(&cur_pubk.to_bytes_compressed()), &to_hex(&pubk.to_bytes_compressed()), self.peer_expire_block_height);
updated = true;
}
}
self.connection.set_public_key(Some(pubk.clone()));
Ok(updated)
}
/// Handle an inbound NAT-punch request -- just tell the peer what we think their IP/port are.
/// No authentication from the peer is necessary.
fn handle_natpunch_request(&self, chain_view: &BurnchainView, nonce: u32) -> StacksMessage {
let natpunch_data = NatPunchData {
addrbytes: self.peer_addrbytes.clone(),
port: self.peer_port,
nonce: nonce
};
let msg = StacksMessage::from_chain_view(self.version, self.network_id, chain_view, StacksMessageType::NatPunchReply(natpunch_data));
msg
}
/// Handle an inbound handshake request, and generate either a HandshakeAccept or a HandshakeReject
/// payload to send back.
/// A handshake will only be accepted if we do not yet know the public key of this remote peer,
/// or if it is signed by the current public key.
/// Returns a reply (either an accept or reject) if appropriate
/// Panics if this message is not a handshake (caller should check)
fn handle_handshake(&mut self, local_peer: &LocalPeer, peerdb: &mut PeerDB, chain_view: &BurnchainView, message: &mut StacksMessage) -> Result<(Option<StacksMessage>, bool), net_error> {
let res = self.validate_handshake(local_peer, chain_view, message);
match res {
Ok(_) => {},
Err(net_error::InvalidHandshake) => {
let reject = StacksMessage::from_chain_view(self.version, self.network_id, chain_view, StacksMessageType::HandshakeReject);
debug!("{:?}: invalid handshake", &self);
return Ok((Some(reject), true));
},
Err(e) => {
return Err(e);
}
};
let handshake_data = match message.payload {
StacksMessageType::Handshake(ref mut data) => data.clone(),
_ => panic!("Message is not a handshake")
};
let old_pubkey_opt = self.connection.get_public_key();
let updated = self.update_from_handshake_data(&message.preamble, &handshake_data)?;
let _authentic_msg = if !updated { "same" } else if old_pubkey_opt.is_none() { "new" } else { "upgraded" };
debug!("Handshake from {:?} {} public key {:?} expires at {:?}", &self, _authentic_msg,
&to_hex(&handshake_data.node_public_key.to_public_key().unwrap().to_bytes_compressed()), handshake_data.expire_block_height);
if updated {
// save the new key
let mut tx = peerdb.tx_begin().map_err(net_error::DBError)?;
let mut neighbor = Neighbor::from_handshake(&mut tx, message.preamble.peer_version, message.preamble.network_id, &handshake_data)?;
neighbor.save_update(&mut tx)?;
tx.commit().map_err(|e| net_error::DBError(db_error::SqliteError(e)))?;
debug!("{:?}: Re-key {:?} to {:?} expires {}", local_peer, &neighbor.addr, &to_hex(&neighbor.public_key.to_bytes_compressed()), neighbor.expire_block);
}
let accept_data = HandshakeAcceptData::new(local_peer, self.heartbeat);
let accept = StacksMessage::from_chain_view(self.version, self.network_id, chain_view, StacksMessageType::HandshakeAccept(accept_data));
// update stats
self.stats.last_contact_time = get_epoch_time_secs();
self.peer_heartbeat = self.heartbeat; // use our own heartbeat to determine how often we expect this peer to ping us, since that's what we've told the peer
// always pass back handshakes, even though we "handled" them (since other processes --
// in particular, the neighbor-walk logic -- need to receive them)
Ok((Some(accept), false))
}
/// Handle an inbound handshake-accept
/// Update conversation state based on a HandshakeAccept
/// Called from the p2p network thread.
fn handle_handshake_accept(&mut self, preamble: &Preamble, handshake_accept: &HandshakeAcceptData) -> Result<(), net_error> {
self.update_from_handshake_data(preamble, &handshake_accept.handshake)?;
self.peer_heartbeat =
if handshake_accept.heartbeat_interval > (MAX_PEER_HEARTBEAT_INTERVAL as u32) {
debug!("{:?}: heartbeat interval is too long; forcing default maximum", self);
MAX_PEER_HEARTBEAT_INTERVAL as u32
}
else {
handshake_accept.heartbeat_interval
};
self.stats.last_handshake_time = get_epoch_time_secs();
debug!("HandshakeAccept from {:?}: set public key to {:?} expiring at {:?} heartbeat {}s", &self,
&to_hex(&handshake_accept.handshake.node_public_key.to_public_key().unwrap().to_bytes_compressed()), handshake_accept.handshake.expire_block_height, self.peer_heartbeat);
Ok(())
}
/// Reply to a ping with a pong.
/// Called from the p2p network thread.
fn handle_ping(&mut self, chain_view: &BurnchainView, message: &mut StacksMessage) -> Result<Option<StacksMessage>, net_error> {
let ping_data = match message.payload {
StacksMessageType::Ping(ref data) => data,
_ => panic!("Message is not a ping")
};
let pong_data = PongData::from_ping(&ping_data);
Ok(Some(StacksMessage::from_chain_view(self.version, self.network_id, chain_view, StacksMessageType::Pong(pong_data))))
}
/// Handle an inbound GetNeighbors request.
fn handle_getneighbors(&mut self, peer_dbconn: &DBConn, local_peer: &LocalPeer, chain_view: &BurnchainView, preamble: &Preamble) -> Result<ReplyHandleP2P, net_error> {
// get neighbors at random as long as they're fresh
let mut neighbors = PeerDB::get_random_neighbors(peer_dbconn, self.network_id, MAX_NEIGHBORS_DATA_LEN, chain_view.burn_block_height, false)
.map_err(net_error::DBError)?;
if cfg!(test) && self.connection.options.disable_chat_neighbors {
// never report neighbors if this is disabled by a test
test_debug!("{:?}: Neighbor crawl is disabled; reporting 0 neighbors", &local_peer);
neighbors.clear();
}
let neighbor_addrs : Vec<NeighborAddress> = neighbors
.iter()
.map(|n| NeighborAddress::from_neighbor(n))
.collect();
debug!("{:?}: handle GetNeighbors from {:?}. Reply with {} neighbors", &local_peer, &self, neighbor_addrs.len());
let payload = StacksMessageType::Neighbors( NeighborsData { neighbors: neighbor_addrs } );
let reply = self.sign_reply(chain_view, &local_peer.private_key, payload, preamble.seq)?;
let reply_handle = self.relay_signed_message(reply)
.map_err(|e| {
debug!("Outbox to {:?} is full; cannot reply to GetNeighbors", &self);
e
})?;
Ok(reply_handle)
}
/// Handle an inbound GetBlocksInv request.
/// Returns a reply handle to the generated message (possibly a nack)
fn handle_getblocksinv(&mut self, local_peer: &LocalPeer, burndb: &BurnDB, chainstate: &mut StacksChainState, burnchain_view: &BurnchainView, preamble: &Preamble, get_blocks_inv: &GetBlocksInv) -> Result<ReplyHandleP2P, net_error> {
let block_hashes = {
let num_headers =
if (get_blocks_inv.num_blocks as u32) > BLOCKS_INV_DATA_MAX_BITLEN {
BLOCKS_INV_DATA_MAX_BITLEN as u64
}
else {
get_blocks_inv.num_blocks as u64
};
match BurnDB::get_stacks_header_hashes(&burndb.index_conn(), num_headers, &get_blocks_inv.consensus_hash, Some(chainstate.get_block_header_cache())) {
Ok(blocks_hashes) => Ok(blocks_hashes),
Err(e) => match e {
db_error::NotFoundError => {
// make this into a NACK
return self.reply_nack(local_peer, burnchain_view, preamble, NackErrorCodes::NoSuchBurnchainBlock);
},
_ => {
Err(net_error::DBError(e))
}
}
}
}?;
// update cache
BurnDB::merge_block_header_cache(chainstate.borrow_block_header_cache(), &block_hashes);
let blocks_inv_data : BlocksInvData = chainstate.get_blocks_inventory(&block_hashes).map_err(|e| net_error::from(e))?;
debug!("{:?}: Handle GetBlocksInv from {:?}. Reply {:?} to request {:?}", &local_peer, &self, &blocks_inv_data, get_blocks_inv);
let blocks_inv_payload = StacksMessageType::BlocksInv(blocks_inv_data);
self.sign_and_reply(local_peer, burnchain_view, preamble, blocks_inv_payload)
}
/// Verify that there are no cycles in our relayers list.
/// Identify relayers by public key hash
fn check_relayer_cycles(relayers: &Vec<RelayData>) -> bool {
let mut addrs = HashSet::new();
for r in relayers.iter() {
if addrs.contains(&r.peer.public_key_hash) {
return false;
}
addrs.insert(r.peer.public_key_hash.clone());
}
true
}
/// Verify that we aren't in this relayers list
fn check_relayers_remote(local_peer: &LocalPeer, relayers: &Vec<RelayData>) -> bool {
let addr = local_peer.to_neighbor_addr();
for r in relayers.iter() {
if r.peer == addr {
return false;
}
}
return true;
}
/// Check that a message was properly relayed.
/// * there are no relay cycles
/// * we didn't send this
/// Update relayer statistics for this conversation
fn process_relayers(&mut self, local_peer: &LocalPeer, preamble: &Preamble, mut relayers: Vec<RelayData>) -> bool {
if !ConversationP2P::check_relayer_cycles(&relayers) {
debug!("Message from {:?} contains a cycle", self.to_neighbor_key());
return false;
}
if !ConversationP2P::check_relayers_remote(local_peer, &relayers) {
debug!("Message originates from us ({})", local_peer.to_neighbor_addr());
return false;
}
for relayer in relayers.drain(..) {
self.stats.add_relayer(relayer.peer, (preamble.payload_len - 1) as u64);
}
return true;
}
/// Validate pushed blocks.
/// Make sure the peer doesn't send us too much at once, though.
fn validate_blocks_push(&mut self, local_peer: &LocalPeer, chain_view: &BurnchainView, preamble: &Preamble, relayers: Vec<RelayData>) -> Result<Option<ReplyHandleP2P>, net_error> {
assert!(preamble.payload_len > 5); // don't count 1-byte type prefix + 4 byte vector length
if !self.process_relayers(local_peer, preamble, relayers) {
self.stats.msgs_err += 1;
return Err(net_error::InvalidMessage);
}
self.stats.add_block_push((preamble.payload_len as u64) - 5);
if self.connection.options.max_block_push_bandwidth > 0 && self.stats.get_block_push_bandwidth() > (self.connection.options.max_block_push_bandwidth as f64) {
debug!("Neighbor {:?} exceeded max block-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_block_push_bandwidth, self.stats.get_block_push_bandwidth());
return self.reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled)
.and_then(|handle| Ok(Some(handle)));
}
Ok(None)
}
/// Validate pushed microblocks.
/// Not much we can do to see if they're semantically correct, but we can at least throttle a
/// peer that sends us too many at once.
fn validate_microblocks_push(&mut self, local_peer: &LocalPeer, chain_view: &BurnchainView, preamble: &Preamble, relayers: Vec<RelayData>) -> Result<Option<ReplyHandleP2P>, net_error> {
assert!(preamble.payload_len > 5); // don't count 1-byte type prefix + 4 byte vector length
if !self.process_relayers(local_peer, preamble, relayers) {
self.stats.msgs_err += 1;
return Err(net_error::InvalidMessage);
}
self.stats.add_microblocks_push((preamble.payload_len as u64) - 5);
if self.connection.options.max_microblocks_push_bandwidth > 0 && self.stats.get_microblocks_push_bandwidth() > (self.connection.options.max_microblocks_push_bandwidth as f64) {
debug!("Neighbor {:?} exceeded max microblocks-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_microblocks_push_bandwidth, self.stats.get_microblocks_push_bandwidth());
return self.reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled)
.and_then(|handle| Ok(Some(handle)));
}
Ok(None)
}
/// Validate a pushed transaction.
/// Update bandwidth accounting, but forward the transaction along.
fn validate_transaction_push(&mut self, local_peer: &LocalPeer, chain_view: &BurnchainView, preamble: &Preamble, relayers: Vec<RelayData>) -> Result<Option<ReplyHandleP2P>, net_error> {
assert!(preamble.payload_len > 1); // don't count 1-byte type prefix
if !self.process_relayers(local_peer, preamble, relayers) {
self.stats.msgs_err += 1;
return Err(net_error::InvalidMessage);
}
self.stats.add_transaction_push((preamble.payload_len as u64) - 1);
if self.connection.options.max_transaction_push_bandwidth > 0 && self.stats.get_transaction_push_bandwidth() > (self.connection.options.max_transaction_push_bandwidth as f64) {
debug!("Neighbor {:?} exceeded max transaction-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_transaction_push_bandwidth, self.stats.get_transaction_push_bandwidth());
return self.reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled)
.and_then(|handle| Ok(Some(handle)));
}
Ok(None)
}
/// Handle an inbound authenticated p2p data-plane message.
/// Return the message if not handled
fn handle_data_message(&mut self, local_peer: &LocalPeer, peerdb: &mut PeerDB, burndb: &BurnDB, chainstate: &mut StacksChainState, chain_view: &BurnchainView, msg: StacksMessage) -> Result<Option<StacksMessage>, net_error> {
let res = match msg.payload {
StacksMessageType::GetNeighbors => self.handle_getneighbors(peerdb.conn(), local_peer, chain_view, &msg.preamble),
StacksMessageType::GetBlocksInv(ref get_blocks_inv) => self.handle_getblocksinv(local_peer, burndb, chainstate, chain_view, &msg.preamble, get_blocks_inv),
StacksMessageType::Blocks(_) => {
// not handled here, but do some accounting -- we can't receive blocks too often,
// so close this conversation if we do.
match self.validate_blocks_push(local_peer, chain_view, &msg.preamble, msg.relayers.clone())? {
Some(handle) => Ok(handle),
None => {
// will forward upstream
return Ok(Some(msg))
}
}
},
StacksMessageType::Microblocks(_) => {
// not handled here, but do some accounting -- we can't receive too many
// unconfirmed microblocks per second
match self.validate_microblocks_push(local_peer, chain_view, &msg.preamble, msg.relayers.clone())? {
Some(handle) => Ok(handle),
None => {
// will forward upstream
return Ok(Some(msg))
}
}
},
StacksMessageType::Transaction(_) => {
// not handled here, but do some accounting -- we can't receive too many
// unconfirmed transactions per second
match self.validate_transaction_push(local_peer, chain_view, &msg.preamble, msg.relayers.clone())? {
Some(handle) => Ok(handle),
None => {
// will forward upstream
return Ok(Some(msg))
}
}
},
_ => {
// all else will forward upstream
return Ok(Some(msg));
}
};
match res {
Ok(handle) => {
self.reply_handles.push_back(handle);
Ok(None)
}
Err(e) => {
debug!("Failed to handle messsage: {:?}", &e);
Ok(Some(msg))
}
}
}
/// Load data into our connection
pub fn recv<R: Read>(&mut self, r: &mut R) -> Result<usize, net_error> {
let mut total_recved = 0;
loop {
let res = self.connection.recv_data(r);
match res {
Ok(num_recved) => {
total_recved += num_recved;
if num_recved > 0 {
self.stats.last_recv_time = get_epoch_time_secs();
self.stats.bytes_rx += num_recved as u64;
}
else {
break;
}
},
Err(e) => {
info!("{:?}: failed to recv on P2P conversation: {:?}", self, &e);
return Err(e);
}
}
}
debug!("{:?}: received {} bytes", self, total_recved);
Ok(total_recved)
}
/// Write data out of our conversation
pub fn send<W: Write>(&mut self, w: &mut W) -> Result<usize, net_error> {
let mut total_sent = 0;
loop {
// queue next byte slice
self.try_flush()?;
let res = self.connection.send_data(w);
match res {
Ok(num_sent) => {
total_sent += num_sent;
if num_sent > 0 {
self.stats.last_send_time = get_epoch_time_secs();
self.stats.bytes_tx += num_sent as u64;
}
else {
break;
}
},
Err(e) => {
info!("{:?}: failed to send on P2P conversation: {:?}", self, &e);
return Err(e);
}
}
}
debug!("{:?}: sent {} bytes", self, total_sent);
Ok(total_sent)
}
/// Make progress on in-flight messages.
pub fn try_flush(&mut self) -> Result<(), net_error> {
// send out responses in the order they were requested
let mut drained = false;
let mut broken = false;
match self.reply_handles.front_mut() {
Some(ref mut reply) => {
// try moving some data to the connection
match reply.try_flush() {
Ok(res) => {
drained = res;
},
Err(e) => {
// dead
warn!("Broken P2P connection: {:?}", &e);
broken = true;
}
}
},
None => {}
}
if broken || drained {
// done with this stream
self.reply_handles.pop_front();
}
Ok(())
}
/// How many pending outgoing messages are there
pub fn num_pending_outbound(&self) -> usize {
self.reply_handles.len()
}
/// Validate an inbound p2p message
/// Return Ok(true) if valid, Ok(false) if invalid, and Err if we should disconnect.
fn validate_inbound_message(&mut self, msg: &StacksMessage, burnchain_view: &BurnchainView) -> Result<bool, net_error> {
// validate message preamble
match self.is_preamble_valid(&msg, burnchain_view) {
Ok(res) => {
if !res {
info!("{:?}: Received message with stale preamble; ignoring", &self);
self.stats.msgs_err += 1;
self.stats.add_healthpoint(false);
return Ok(false);
}
},
Err(e) => {
match e {
net_error::InvalidMessage => {
// Disconnect from this peer. If it thinks nothing's wrong, it'll
// reconnect on its own.
// However, only count this message as error. Drop all other queued
// messages.
info!("{:?}: Received invalid preamble; dropping connection", &self);
self.stats.msgs_err += 1;
self.stats.add_healthpoint(false);
return Err(e);
},
_ => {
// skip this message
info!("{:?}: Failed to process message: {:?}", &self, &e);
self.stats.msgs_err += 1;
self.stats.add_healthpoint(false);
return Ok(false);
}
}
}
}
return Ok(true);
}
/// Handle an inbound authenticated p2p control-plane message
/// Return true if we should consume it (i.e. it's not something to forward along), as well as the message we'll send as a reply (if any)
fn handle_authenticated_control_message(&mut self, local_peer: &LocalPeer, peerdb: &mut PeerDB, burnchain_view: &BurnchainView, msg: &mut StacksMessage) -> Result<(Option<StacksMessage>, bool), net_error> {
let mut consume = false;
// already have public key; match payload
let reply_opt = match msg.payload {
StacksMessageType::Handshake(_) => {
debug!("{:?}: Got Handshake", &self);
let (handshake_opt, handled) = self.handle_handshake(local_peer, peerdb, burnchain_view, msg)?;
consume = handled;
Ok(handshake_opt)
},
StacksMessageType::HandshakeAccept(ref data) => {
test_debug!("{:?}: Got HandshakeAccept", &self);
self.handle_handshake_accept(&msg.preamble, data).and_then(|_| Ok(None))
},
StacksMessageType::Ping(_) => {
test_debug!("{:?}: Got Ping", &self);
// consume here if unsolicited
consume = true;
self.handle_ping(burnchain_view, msg)
},
StacksMessageType::Pong(_) => {
test_debug!("{:?}: Got Pong", &self);
Ok(None)
},
StacksMessageType::NatPunchRequest(ref nonce) => {
if cfg!(test) && self.connection.options.disable_natpunch {
return Err(net_error::InvalidMessage);
}
test_debug!("{:?}: Got NatPunchRequest({})", &self, nonce);
consume = true;
let msg = self.handle_natpunch_request(burnchain_view, *nonce);
Ok(Some(msg))
},
StacksMessageType::NatPunchReply(ref _m) => {
if cfg!(test) && self.connection.options.disable_natpunch {
return Err(net_error::InvalidMessage);
}
test_debug!("{:?}: Got NatPunchReply({})", &self, _m.nonce);
Ok(None)
},
_ => {
test_debug!("{:?}: Got a data-plane message (type {})", &self, msg.payload.get_message_name());
Ok(None) // nothing to reply to at this time
}
}?;
Ok((reply_opt, consume))
}
/// Handle an inbound unauthenticated p2p control-plane message.
/// Return true if the message was also solicited, as well as the reply we generate to
/// deal with it (if we do deal with it)
fn handle_unauthenticated_control_message(&mut self, local_peer: &LocalPeer, peerdb: &mut PeerDB, burnchain_view: &BurnchainView, msg: &mut StacksMessage) -> Result<(Option<StacksMessage>, bool), net_error> {
// only thing we'll take right now is a handshake, as well as handshake
// accept/rejects, nacks, and NAT holepunches
//
// Anything else will be nack'ed -- the peer will first need to handshake.
let mut consume = false;
let solicited = self.connection.is_solicited(&msg);
let reply_opt = match msg.payload {
StacksMessageType::Handshake(_) => {
test_debug!("{:?}: Got unauthenticated Handshake", &self);
let (reply_opt, handled) = self.handle_handshake(local_peer, peerdb, burnchain_view, msg)?;
consume = handled;
Ok(reply_opt)
},
StacksMessageType::HandshakeAccept(ref data) => {
if solicited {
test_debug!("{:?}: Got unauthenticated HandshakeAccept", &self);
self.handle_handshake_accept(&msg.preamble, data).and_then(|_| Ok(None))
}
else {
test_debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self);
// don't update stats or state, and don't pass back
consume = true;
Ok(None)
}
},
StacksMessageType::HandshakeReject => {
test_debug!("{:?}: Got unauthenticated HandshakeReject", &self);
// don't NACK this back just because we were rejected.
// But, it's okay to forward this back (i.e. don't consume).
Ok(None)
},
StacksMessageType::Nack(_) => {
test_debug!("{:?}: Got unauthenticated Nack", &self);
// don't NACK back.
// But, it's okay to forward this back (i.e. don't consume).
Ok(None)
}
StacksMessageType::NatPunchRequest(ref nonce) => {
if cfg!(test) && self.connection.options.disable_natpunch {
return Err(net_error::InvalidMessage);
}
test_debug!("{:?}: Got unauthenticated NatPunchRequest({})", &self, *nonce);
consume = true;
let msg = self.handle_natpunch_request(burnchain_view, *nonce);
Ok(Some(msg))
},
StacksMessageType::NatPunchReply(ref _m) => {
if cfg!(test) && self.connection.options.disable_natpunch {
return Err(net_error::InvalidMessage);
}
test_debug!("{:?}: Got unauthenticated NatPunchReply({})", &self, _m.nonce);
// it's okay to forward this back (i.e. don't consume)
Ok(None)
},
_ => {
test_debug!("{:?}: Got unauthenticated message (type {}), will NACK", &self, msg.payload.get_message_name());
let nack_payload = StacksMessageType::Nack(NackData::new(NackErrorCodes::HandshakeRequired));
let nack = StacksMessage::from_chain_view(self.version, self.network_id, burnchain_view, nack_payload);
// unauthenticated, so don't forward it (but do consume it, and do nack it)
consume = true;
Ok(Some(nack))
}
}?;
Ok((reply_opt, consume))
}
/// Carry on a conversation with the remote peer.
/// Called from the p2p network thread, so no need for a network handle.
/// Attempts to fulfill requests in other threads as a result of processing a message.
/// Returns the list of unfulfilled Stacks messages we received -- messages not destined for
/// any other thread in this program (i.e. "unsolicited messages").
pub fn chat(&mut self, local_peer: &LocalPeer, peerdb: &mut PeerDB, burndb: &BurnDB, chainstate: &mut StacksChainState, burnchain_view: &BurnchainView) -> Result<Vec<StacksMessage>, net_error> {
let num_inbound = self.connection.inbox_len();
test_debug!("{:?}: {} messages pending", &self, num_inbound);
let mut unsolicited = vec![];
for _ in 0..num_inbound {
let update_stats; // whether or not this message can count towards this peer's liveness stats
let mut msg = match self.connection.next_inbox_message() {
None => {
continue;
},
Some(m) => m
};
if !self.validate_inbound_message(&msg, burnchain_view)? {
continue;
}
let (reply_opt, consumed) =
if self.connection.has_public_key() {
// we already have this remote peer's public key, so the message signature will
// have been verified by the underlying ConnectionP2P.
update_stats = true;
self.handle_authenticated_control_message(local_peer, peerdb, burnchain_view, &mut msg)?
}
else {
// the underlying ConnectionP2P does not yet have a public key installed (i.e.
// we don't know it yet), so treat this message with a little bit more
// suspicion.
// Update stats only if we were asking for this message.
update_stats = self.connection.is_solicited(&msg);
self.handle_unauthenticated_control_message(local_peer, peerdb, burnchain_view, &mut msg)?
};
match reply_opt {
None => {}
Some(mut reply) => {
// send back this message to the remote peer
test_debug!("{:?}: Send control-plane reply type {}", &self, reply.payload.get_message_name());
reply.sign(msg.preamble.seq, &local_peer.private_key)?;
let reply_handle = self.relay_signed_message(reply)?;
self.reply_handles.push_back(reply_handle);
}
}
let now = get_epoch_time_secs();
let _msgtype = msg.payload.get_message_name().to_owned();
let _seq = msg.request_id();
if update_stats {
// successfully got a message we asked for-- update stats
if self.stats.first_contact_time == 0 {
self.stats.first_contact_time = now;
}
let msg_id = msg.payload.get_message_id();
let count = match self.stats.msg_rx_counts.get(&msg_id) {
None => 1,
Some(c) => c + 1
};
self.stats.msg_rx_counts.insert(msg_id, count);
self.stats.msgs_rx += 1;
self.stats.last_recv_time = now;
self.stats.last_contact_time = get_epoch_time_secs();
self.stats.add_healthpoint(true);
// update chain view from preamble
if msg.preamble.burn_block_height > self.burnchain_tip_height {
self.burnchain_tip_height = msg.preamble.burn_block_height;
self.burnchain_tip_consensus_hash = msg.preamble.burn_consensus_hash.clone();
}
if msg.preamble.burn_stable_block_height > self.burnchain_stable_tip_height {
self.burnchain_stable_tip_height = msg.preamble.burn_stable_block_height;
self.burnchain_stable_tip_consensus_hash = msg.preamble.burn_stable_consensus_hash.clone();
}
debug!("{:?}: remote chain view is ({},{})-({},{})", self, self.burnchain_stable_tip_height, &self.burnchain_stable_tip_consensus_hash, self.burnchain_tip_height, &self.burnchain_tip_consensus_hash);
}
else {
// got an unhandled message we didn't ask for
self.stats.msgs_rx_unsolicited += 1;
}
debug!("{:?}: Received message {}", &self, _msgtype);
// Is there someone else waiting for this message? If so, pass it along.
let fulfill_opt = self.connection.fulfill_request(msg);
match fulfill_opt {
None => {
debug!("{:?}: Fulfilled pending message request (type {} seq {})", &self, _msgtype, _seq);
},
Some(msg) => {
if consumed {
// already handled
debug!("{:?}: Consumed message (type {} seq {})", &self, _msgtype, _seq);
}
else {
test_debug!("{:?}: Try handling message (type {} seq {})", &self, _msgtype, _seq);
let msg_opt = self.handle_data_message(local_peer, peerdb, burndb, chainstate, burnchain_view, msg)?;
match msg_opt {
Some(msg) => {
debug!("{:?}: Did not handle message (type {} seq {}); passing upstream", &self, _msgtype, _seq);
unsolicited.push(msg);
},
None => {
debug!("{:?}: Handled message {} seq {}", &self, _msgtype, _seq);
}
}
}
}
}
}
Ok(unsolicited)
}
/// Remove all timed-out messages, and ding the remote peer as unhealthy
pub fn clear_timeouts(&mut self) -> () {
let num_drained = self.connection.drain_timeouts();
for _ in 0..num_drained {
self.stats.add_healthpoint(false);
}
}
/// Get a ref to the conversation stats
pub fn get_stats(&self) -> &NeighborStats {
&self.stats
}
/// Get a mut ref to the conversation stats
pub fn get_stats_mut(&mut self) -> &mut NeighborStats {
&mut self.stats
}
}
#[cfg(test)]
mod test {
use super::*;
use net::*;
use net::connection::*;
use net::db::*;
use net::p2p::*;
use util::secp256k1::*;
use util::uint::*;
use util::pipe::*;
use burnchains::*;
use burnchains::burnchain::*;
use chainstate::*;
use chainstate::burn::*;
use chainstate::burn::db::burndb::*;
use burnchains::bitcoin::address::BitcoinAddress;
use burnchains::bitcoin::keys::BitcoinPublicKey;
use std::net::SocketAddr;
use std::net::SocketAddrV4;
use std::io::prelude::*;
use std::io::Read;
use std::io::Write;
use std::fs;
use net::test::*;
use core::{PEER_VERSION, NETWORK_P2P_PORT};
fn make_test_chain_dbs(testname: &str, burnchain: &Burnchain, network_id: u32, key_expires: u64, data_url: UrlString, asn4_entries: &Vec<ASEntry4>, initial_neighbors: &Vec<Neighbor>) -> (PeerDB, BurnDB, StacksChainState) {
let test_path = format!("/tmp/blockstack-test-databases-{}", testname);
match fs::metadata(&test_path) {
Ok(_) => {
fs::remove_dir_all(&test_path).unwrap();
},
Err(_) => {}
};
fs::create_dir_all(&test_path).unwrap();
let burndb_path = format!("{}/burn", &test_path);
let peerdb_path = format!("{}/peers.db", &test_path);
let chainstate_path = format!("{}/chainstate", &test_path);
let peerdb = PeerDB::connect(&peerdb_path, true, network_id, burnchain.network_id, None, key_expires, PeerAddress::from_ipv4(127, 0, 0, 1), NETWORK_P2P_PORT, data_url.clone(), &asn4_entries, Some(&initial_neighbors)).unwrap();
let burndb = BurnDB::connect(&burndb_path, burnchain.first_block_height, &burnchain.first_block_hash, get_epoch_time_secs(), true).unwrap();
let chainstate = StacksChainState::open(false, network_id, &chainstate_path).unwrap();
(peerdb, burndb, chainstate)
}
fn convo_send_recv(sender: &mut ConversationP2P, mut sender_handles: Vec<&mut ReplyHandleP2P>, receiver: &mut ConversationP2P) -> () {
let (mut pipe_read, mut pipe_write) = Pipe::new();
pipe_read.set_nonblocking(true);
loop {
let mut res = true;
for i in 0..sender_handles.len() {
let r = sender_handles[i].try_flush().unwrap();
res = r && res;
}
sender.try_flush().unwrap();
receiver.try_flush().unwrap();
pipe_write.try_flush().unwrap();
let all_relays_flushed = receiver.num_pending_outbound() == 0 && sender.num_pending_outbound() == 0;
let nw = sender.send(&mut pipe_write).unwrap();
let nr = receiver.recv(&mut pipe_read).unwrap();
test_debug!("res = {}, all_relays_flushed = {}, nr = {}, nw = {}", res, all_relays_flushed, nr, nw);
if res && all_relays_flushed && nr == 0 && nw == 0 {
break;
}
}
eprintln!("pipe_read = {:?}", pipe_read);
eprintln!("pipe_write = {:?}", pipe_write);
}
fn db_setup(peerdb: &mut PeerDB, burndb: &mut BurnDB, socketaddr: &SocketAddr, chain_view: &BurnchainView) -> () {
{
let mut tx = peerdb.tx_begin().unwrap();
PeerDB::set_local_ipaddr(&mut tx, &PeerAddress::from_socketaddr(socketaddr), socketaddr.port()).unwrap();
tx.commit().unwrap();
}
let mut tx = burndb.tx_begin().unwrap();
let mut prev_snapshot = BurnDB::get_first_block_snapshot(&tx).unwrap();
for i in prev_snapshot.block_height..chain_view.burn_block_height+1 {
let mut next_snapshot = prev_snapshot.clone();
next_snapshot.block_height += 1;
if i == chain_view.burn_block_height {
next_snapshot.consensus_hash = chain_view.burn_consensus_hash.clone();
}
else if i == chain_view.burn_stable_block_height {
next_snapshot.consensus_hash = chain_view.burn_stable_consensus_hash.clone();
}
let big_i = Uint256::from_u64(i as u64);
let mut big_i_bytes_32 = [0u8; 32];
let mut big_i_bytes_20 = [0u8; 20];
big_i_bytes_32.copy_from_slice(&big_i.to_u8_slice());
big_i_bytes_20.copy_from_slice(&big_i.to_u8_slice()[0..20]);
next_snapshot.consensus_hash = ConsensusHash(big_i_bytes_20);
next_snapshot.parent_burn_header_hash = next_snapshot.burn_header_hash.clone();
next_snapshot.burn_header_hash = BurnchainHeaderHash(big_i_bytes_32.clone());
next_snapshot.ops_hash = OpsHash::from_bytes(&big_i_bytes_32).unwrap();
next_snapshot.winning_stacks_block_hash = BlockHeaderHash(big_i_bytes_32.clone());
next_snapshot.winning_block_txid = Txid(big_i_bytes_32.clone());
next_snapshot.total_burn += 1;
next_snapshot.sortition = true;
next_snapshot.sortition_hash = next_snapshot.sortition_hash.mix_burn_header(&BurnchainHeaderHash(big_i_bytes_32.clone()));
next_snapshot.num_sortitions += 1;
let next_index_root = BurnDB::append_chain_tip_snapshot(&mut tx, &prev_snapshot, &next_snapshot, &vec![], &vec![]).unwrap();
next_snapshot.index_root = next_index_root;
test_debug!("i = {}, chain_view.burn_block_height = {}, ch = {}", i, chain_view.burn_block_height, next_snapshot.consensus_hash);
prev_snapshot = next_snapshot;
}
tx.commit().unwrap();
}
#[test]
#[ignore]
fn convo_handshake_accept() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_handshake_accept_1", &burnchain, 0x9abcdef0, 12350, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_handshake_accept_2", &burnchain, 0x9abcdef0, 12351, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 0);
// no peer public keys known yet
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
// convo_1 sends a handshake to convo_2
let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1);
let handshake_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Handshake(handshake_data_1.clone())).unwrap();
let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap();
// convo_2 receives it and processes it, and since no one is waiting for it, will forward
// it along to the chat caller (us)
test_debug!("send handshake");
convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 has a handshakeaccept
test_debug!("send handshake-accept");
convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let reply_1 = rh_1.recv(0).unwrap();
assert_eq!(unhandled_1.len(), 0);
assert_eq!(unhandled_2.len(), 1);
// convo 2 returns the handshake from convo 1
match unhandled_2[0].payload {
StacksMessageType::Handshake(ref data) => {
assert_eq!(handshake_data_1, *data);
},
_ => {
assert!(false);
}
};
// received a valid HandshakeAccept from peer 2
match reply_1.payload {
StacksMessageType::HandshakeAccept(ref data) => {
assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes);
assert_eq!(data.handshake.port, local_peer_2.port);
assert_eq!(data.handshake.services, local_peer_2.services);
assert_eq!(data.handshake.node_public_key, StacksPublicKeyBuffer::from_public_key(&Secp256k1PublicKey::from_private(&local_peer_2.private_key)));
assert_eq!(data.handshake.expire_block_height, local_peer_2.private_key_expire);
assert_eq!(data.handshake.data_url, "http://peer2.com".into());
assert_eq!(data.heartbeat_interval, conn_opts.heartbeat);
},
_ => {
assert!(false);
}
};
// convo_2 got updated with convo_1's peer info, but no heartbeat info
assert_eq!(convo_2.peer_heartbeat, 3600);
assert_eq!(convo_2.connection.get_public_key().unwrap(), Secp256k1PublicKey::from_private(&local_peer_1.private_key));
assert_eq!(convo_2.data_url, "http://peer1.com".into());
// convo_1 got updated with convo_2's peer info, as well as heartbeat
assert_eq!(convo_1.peer_heartbeat, conn_opts.heartbeat);
assert_eq!(convo_1.connection.get_public_key().unwrap(), Secp256k1PublicKey::from_private(&local_peer_2.private_key));
assert_eq!(convo_1.data_url, "http://peer2.com".into());
}
#[test]
fn convo_handshake_reject() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_handshake_reject_1", &burnchain, 0x9abcdef0, 12350, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_handshake_reject_2", &burnchain, 0x9abcdef0, 12351, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 0);
// no peer public keys known yet
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
// convo_1 sends a _stale_ handshake to convo_2 (wrong public key)
let mut handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1);
handshake_data_1.expire_block_height = 12340;
let handshake_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Handshake(handshake_data_1.clone())).unwrap();
let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap();
// convo_2 receives it and automatically rejects it.
convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 has a handshakreject
convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let reply_1 = rh_1.recv(0).unwrap();
assert_eq!(unhandled_1.len(), 0);
assert_eq!(unhandled_2.len(), 0);
// received a valid HandshakeReject from peer 2
match reply_1.payload {
StacksMessageType::HandshakeReject => {},
_ => {
assert!(false);
}
};
// neither peer updated their info on one another
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
}
#[test]
fn convo_handshake_badsignature() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_handshake_badsignature_1", &burnchain, 0x9abcdef0, 12350, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_handshake_badsignature_2", &burnchain, 0x9abcdef0, 12351, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 0);
// no peer public keys known yet
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
// convo_1 sends an _invalid_ handshake to convo_2 (bad signature)
let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1);
let mut handshake_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Handshake(handshake_data_1.clone())).unwrap();
match handshake_1.payload {
StacksMessageType::Handshake(ref mut data) => {
data.expire_block_height += 1;
},
_ => panic!()
};
let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap();
// convo_2 receives it and processes it, and barfs
convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2);
let unhandled_2_err = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view);
// convo_1 gets a nack and consumes it
convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
// the waiting reply aborts on disconnect
let reply_1_err = rh_1.recv(0);
assert_eq!(unhandled_2_err.unwrap_err(), net_error::InvalidMessage);
assert_eq!(reply_1_err, Err(net_error::ConnectionBroken));
assert_eq!(unhandled_1.len(), 0);
// neither peer updated their info on one another
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
}
#[test]
fn convo_handshake_self() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_handshake_self_1", &burnchain, 0x9abcdef0, 12350, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_handshake_self_2", &burnchain, 0x9abcdef0, 12351, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 0);
// no peer public keys known yet
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
// convo_1 sends a handshake to itself (not allowed)
let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_2);
let handshake_1 = convo_1.sign_message(&chain_view, &local_peer_2.private_key, StacksMessageType::Handshake(handshake_data_1.clone())).unwrap();
let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap();
// convo_2 receives it and processes it automatically (consuming it), and give back a handshake reject
convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
// convo_1 gets a handshake reject and consumes it
convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// get back handshake reject
let reply_1 = rh_1.recv(0).unwrap();
assert_eq!(unhandled_1.len(), 0);
assert_eq!(unhandled_2.len(), 0);
// received a valid HandshakeReject from peer 2
match reply_1.payload {
StacksMessageType::HandshakeReject => {},
_ => {
assert!(false);
}
};
// neither peer updated their info on one another
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
}
#[test]
fn convo_ping() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_ping_1", &burnchain, 0x9abcdef0, 12350, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_ping_2", &burnchain, 0x9abcdef0, 12351, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 0);
// convo_1 sends a handshake to convo_2
let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1);
let handshake_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Handshake(handshake_data_1.clone())).unwrap();
let mut rh_handshake_1 = convo_1.send_signed_request(handshake_1.clone(), 1000000).unwrap();
// convo_1 sends a ping to convo_2
let ping_data_1 = PingData::new();
let ping_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Ping(ping_data_1.clone())).unwrap();
let mut rh_ping_1 = convo_1.send_signed_request(ping_1.clone(), 1000000).unwrap();
// convo_2 receives the handshake and ping and processes both, and since no one is waiting for the handshake, will forward
// it along to the chat caller (us)
test_debug!("send handshake {:?}", &handshake_1);
test_debug!("send ping {:?}", &ping_1);
convo_send_recv(&mut convo_1, vec![&mut rh_handshake_1, &mut rh_ping_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 has a handshakeaccept
test_debug!("reply handshake-accept");
test_debug!("send pong");
convo_send_recv(&mut convo_2, vec![&mut rh_handshake_1, &mut rh_ping_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let reply_handshake_1 = rh_handshake_1.recv(0).unwrap();
let reply_ping_1 = rh_ping_1.recv(0).unwrap();
assert_eq!(unhandled_1.len(), 0);
assert_eq!(unhandled_2.len(), 1); // only the handshake is given back. the ping is consumed
// convo 2 returns the handshake from convo 1
match unhandled_2[0].payload {
StacksMessageType::Handshake(ref data) => {
assert_eq!(handshake_data_1, *data);
},
_ => {
assert!(false);
}
};
// convo 2 replied to convo 1 with a matching pong
match reply_ping_1.payload {
StacksMessageType::Pong(ref data) => {
assert_eq!(data.nonce, ping_data_1.nonce);
},
_ => {
assert!(false);
}
}
}
#[test]
fn convo_handshake_ping_loop() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_handshake_ping_loop_1", &burnchain, 0x9abcdef0, 12350, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_handshake_ping_loop_2", &burnchain, 0x9abcdef0, 12351, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 1);
for i in 0..5 {
// do handshake/ping over and over, with different keys.
// tests re-keying.
// convo_1 sends a handshake to convo_2
let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1);
let handshake_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Handshake(handshake_data_1.clone())).unwrap();
let mut rh_handshake_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap();
// convo_1 sends a ping to convo_2
let ping_data_1 = PingData::new();
let ping_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Ping(ping_data_1.clone())).unwrap();
let mut rh_ping_1 = convo_1.send_signed_request(ping_1, 1000000).unwrap();
// convo_2 receives the handshake and ping and processes both, and since no one is waiting for the handshake, will forward
// it along to the chat caller (us)
convo_send_recv(&mut convo_1, vec![&mut rh_handshake_1, &mut rh_ping_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 has a handshakeaccept
convo_send_recv(&mut convo_2, vec![&mut rh_handshake_1, &mut rh_ping_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let reply_handshake_1 = rh_handshake_1.recv(0).unwrap();
let reply_ping_1 = rh_ping_1.recv(0).unwrap();
assert_eq!(unhandled_1.len(), 0);
assert_eq!(unhandled_2.len(), 1); // only the handshake is given back. the ping is consumed
// convo 2 returns the handshake from convo 1
match unhandled_2[0].payload {
StacksMessageType::Handshake(ref data) => {
assert_eq!(handshake_data_1, *data);
},
_ => {
assert!(false);
}
};
// convo 2 replied to convo 1 with a matching pong
match reply_ping_1.payload {
StacksMessageType::Pong(ref data) => {
assert_eq!(data.nonce, ping_data_1.nonce);
},
_ => {
assert!(false);
}
}
// received a valid HandshakeAccept from peer 2
match reply_handshake_1.payload {
StacksMessageType::HandshakeAccept(ref data) => {
assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes);
assert_eq!(data.handshake.port, local_peer_2.port);
assert_eq!(data.handshake.services, local_peer_2.services);
assert_eq!(data.handshake.node_public_key, StacksPublicKeyBuffer::from_public_key(&Secp256k1PublicKey::from_private(&local_peer_2.private_key)));
assert_eq!(data.handshake.expire_block_height, local_peer_2.private_key_expire);
assert_eq!(data.heartbeat_interval, conn_opts.heartbeat);
},
_ => {
assert!(false);
}
};
// convo_2 got updated with convo_1's peer info, and default heartbeat filled in
assert_eq!(convo_2.peer_heartbeat, 3600);
assert_eq!(convo_2.connection.get_public_key().unwrap().to_bytes_compressed(), Secp256k1PublicKey::from_private(&local_peer_1.private_key).to_bytes_compressed());
// convo_1 got updated with convo_2's peer info, as well as heartbeat
assert_eq!(convo_1.peer_heartbeat, conn_opts.heartbeat);
assert_eq!(convo_1.connection.get_public_key().unwrap().to_bytes_compressed(), Secp256k1PublicKey::from_private(&local_peer_2.private_key).to_bytes_compressed());
// regenerate keys and expiries in peer 1
let new_privkey = Secp256k1PrivateKey::new();
{
let mut tx = peerdb_1.tx_begin().unwrap();
PeerDB::set_local_private_key(&mut tx, &new_privkey, (12350 + i) as u64).unwrap();
tx.commit().unwrap();
}
}
}
#[test]
fn convo_nack_unsolicited() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_nack_unsolicited_1", &burnchain, 0x9abcdef0, 12350, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_nack_unsolicited_2", &burnchain, 0x9abcdef0, 12351, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 0);
// no peer public keys known yet
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
// convo_1 sends a ping to convo_2
let ping_data_1 = PingData::new();
let ping_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Ping(ping_data_1.clone())).unwrap();
let mut rh_ping_1 = convo_1.send_signed_request(ping_1, 1000000).unwrap();
// convo_2 will reply with a nack since peer_1 hasn't authenticated yet
convo_send_recv(&mut convo_1, vec![&mut rh_ping_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 has a nack
convo_send_recv(&mut convo_2, vec![&mut rh_ping_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let reply_1 = rh_ping_1.recv(0).unwrap();
// convo_2 gives back nothing
assert_eq!(unhandled_1.len(), 0);
assert_eq!(unhandled_2.len(), 0);
// convo_1 got a NACK
match reply_1.payload {
StacksMessageType::Nack(ref data) => {
assert_eq!(data.error_code, NackErrorCodes::HandshakeRequired);
},
_ => {
assert!(false);
}
};
// convo_2 did NOT get updated with convo_1's peer info
assert_eq!(convo_2.peer_heartbeat, 0);
assert!(convo_2.connection.get_public_key().is_none());
// convo_1 did NOT get updated
assert_eq!(convo_1.peer_heartbeat, 0);
assert!(convo_2.connection.get_public_key().is_none());
}
#[test]
#[ignore]
fn convo_handshake_getblocksinv() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_handshake_accept_1", &burnchain, 0x9abcdef0, 12350, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_handshake_accept_2", &burnchain, 0x9abcdef0, 12351, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 0);
// no peer public keys known yet
assert!(convo_1.connection.get_public_key().is_none());
assert!(convo_2.connection.get_public_key().is_none());
// convo_1 sends a handshake to convo_2
let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1);
let handshake_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Handshake(handshake_data_1.clone())).unwrap();
let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap();
// convo_2 receives it and processes it, and since no one is waiting for it, will forward
// it along to the chat caller (us)
test_debug!("send handshake");
convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 has a handshakeaccept
test_debug!("send handshake-accept");
convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let reply_1 = rh_1.recv(0).unwrap();
assert_eq!(unhandled_1.len(), 0);
assert_eq!(unhandled_2.len(), 1);
// convo 2 returns the handshake from convo 1
match unhandled_2[0].payload {
StacksMessageType::Handshake(ref data) => {
assert_eq!(handshake_data_1, *data);
},
_ => {
assert!(false);
}
};
// received a valid HandshakeAccept from peer 2
match reply_1.payload {
StacksMessageType::HandshakeAccept(ref data) => {
assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes);
assert_eq!(data.handshake.port, local_peer_2.port);
assert_eq!(data.handshake.services, local_peer_2.services);
assert_eq!(data.handshake.node_public_key, StacksPublicKeyBuffer::from_public_key(&Secp256k1PublicKey::from_private(&local_peer_2.private_key)));
assert_eq!(data.handshake.expire_block_height, local_peer_2.private_key_expire);
assert_eq!(data.handshake.data_url, "http://peer2.com".into());
assert_eq!(data.heartbeat_interval, conn_opts.heartbeat);
},
_ => {
assert!(false);
}
};
// convo_1 sends a getblocksinv to convo_2 for all the blocks
let convo_1_chaintip = BurnDB::get_canonical_burn_chain_tip(burndb_1.conn()).unwrap();
let getblocksdata_1 = GetBlocksInv { consensus_hash: convo_1_chaintip.consensus_hash, num_blocks: BLOCKS_INV_DATA_MAX_BITLEN as u16 };
let getblocksdata_1_msg = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::GetBlocksInv(getblocksdata_1.clone())).unwrap();
let mut rh_1 = convo_1.send_signed_request(getblocksdata_1_msg, 10000000).unwrap();
// convo_2 receives it, and handles it
test_debug!("send getblocksinv");
convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 gets back a blocksinv message
test_debug!("send blocksinv");
convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let reply_1 = rh_1.recv(0).unwrap();
// no unhandled messages forwarded
assert_eq!(unhandled_1, vec![]);
assert_eq!(unhandled_2, vec![]);
// convo 2 returned a block-inv for all blocks
match reply_1.payload {
StacksMessageType::BlocksInv(ref data) => {
assert_eq!(data.bitlen, BLOCKS_INV_DATA_MAX_BITLEN as u16);
test_debug!("data: {:?}", data);
// all burn blocks had sortitions, but we have no Stacks blocks :(
for i in 0..data.bitlen {
assert!(!data.has_ith_block(i));
}
},
_ => {
assert!(false);
}
}
// request for a non-existent consensus hash
let getblocksdata_diverged_1 = GetBlocksInv { consensus_hash: ConsensusHash([0xff; 20]), num_blocks: BLOCKS_INV_DATA_MAX_BITLEN as u16 };
let getblocksdata_diverged_1_msg = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::GetBlocksInv(getblocksdata_diverged_1.clone())).unwrap();
let mut rh_1 = convo_1.send_signed_request(getblocksdata_diverged_1_msg, 10000000).unwrap();
// convo_2 receives it, and handles it
test_debug!("send getblocksinv (diverged)");
convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 gets back a nack message
test_debug!("send nack (diverged)");
convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let reply_1 = rh_1.recv(0).unwrap();
// no unhandled messages forwarded
assert_eq!(unhandled_1, vec![]);
assert_eq!(unhandled_2, vec![]);
// convo 2 returned a nack with the appropriate error message
match reply_1.payload {
StacksMessageType::Nack(ref data) => {
assert_eq!(data.error_code, NackErrorCodes::NoSuchBurnchainBlock);
},
_ => {
assert!(false);
}
}
}
#[test]
fn convo_natpunch() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let (mut peerdb_1, mut burndb_1, mut chainstate_1) = make_test_chain_dbs("convo_natpunch_1", &burnchain, 0x9abcdef0, 12352, "http://peer1.com".into(), &vec![], &vec![]);
let (mut peerdb_2, mut burndb_2, mut chainstate_2) = make_test_chain_dbs("convo_natpunch_2", &burnchain, 0x9abcdef0, 12353, "http://peer2.com".into(), &vec![], &vec![]);
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
db_setup(&mut peerdb_2, &mut burndb_2, &socketaddr_2, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap();
let mut convo_1 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let mut convo_2 = ConversationP2P::new(123, 456, &burnchain, &socketaddr_1, &conn_opts, true, 0);
// convo_1 sends natpunch request to convo_2
let natpunch_1 = convo_1.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::NatPunchRequest(0x12345678)).unwrap();
let mut rh_natpunch_1 = convo_1.send_signed_request(natpunch_1.clone(), 1000000).unwrap();
// convo_2 receives the natpunch request and processes it
test_debug!("send natpunch {:?}", &natpunch_1);
convo_send_recv(&mut convo_1, vec![&mut rh_natpunch_1], &mut convo_2);
let unhandled_2 = convo_2.chat(&local_peer_2, &mut peerdb_2, &burndb_2, &mut chainstate_2, &chain_view).unwrap();
// convo_1 gets back a natpunch reply
test_debug!("reply natpunch-reply");
convo_send_recv(&mut convo_2, vec![&mut rh_natpunch_1], &mut convo_1);
let unhandled_1 = convo_1.chat(&local_peer_1, &mut peerdb_1, &burndb_1, &mut chainstate_1, &chain_view).unwrap();
let natpunch_reply_1 = rh_natpunch_1.recv(0).unwrap();
// handled and consumed
assert_eq!(unhandled_1.len(), 0);
assert_eq!(unhandled_2.len(), 0);
// convo_2 replies the natpunch data for convo_1 -- i.e. what convo_2 thinks convo_1's IP
// address is
match natpunch_reply_1.payload {
StacksMessageType::NatPunchReply(ref data) => {
assert_eq!(data.addrbytes, PeerAddress::from_socketaddr(&socketaddr_1));
assert_eq!(data.nonce, 0x12345678);
},
_ => {
assert!(false);
}
}
}
#[test]
fn convo_is_preamble_valid() {
let conn_opts = ConnectionOptions::default();
let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let mut peerdb_1 = PeerDB::connect_memory(0x9abcdef0, 0, 12350, "http://peer1.com".into(), &vec![], &vec![]).unwrap();
let mut burndb_1 = BurnDB::connect_test(12300, &first_burn_hash).unwrap();
let mut burndb_2 = BurnDB::connect_test(12300, &first_burn_hash).unwrap();
db_setup(&mut peerdb_1, &mut burndb_1, &socketaddr_1, &chain_view);
let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap();
// network ID check
{
let mut convo_bad = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let ping_data = PingData::new();
convo_bad.network_id += 1;
let ping_bad = convo_bad.sign_message(&chain_view, &local_peer_1.private_key, StacksMessageType::Ping(ping_data.clone())).unwrap();
convo_bad.network_id -= 1;
assert_eq!(convo_bad.is_preamble_valid(&ping_bad, &chain_view), Err(net_error::InvalidMessage));
}
// stable block height check
{
let mut convo_bad = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let ping_data = PingData::new();
let mut chain_view_bad = chain_view.clone();
chain_view_bad.burn_stable_block_height -= 1;
let ping_bad = convo_bad.sign_message(&chain_view_bad, &local_peer_1.private_key, StacksMessageType::Ping(ping_data.clone())).unwrap();
assert_eq!(convo_bad.is_preamble_valid(&ping_bad, &chain_view), Err(net_error::InvalidMessage));
}
// node is too far ahead of us
{
let mut convo_bad = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let ping_data = PingData::new();
let mut chain_view_bad = chain_view.clone();
chain_view_bad.burn_stable_block_height += MAX_NEIGHBOR_BLOCK_DELAY + 1 + burnchain.stable_confirmations as u64;
chain_view_bad.burn_block_height += MAX_NEIGHBOR_BLOCK_DELAY + 1 + burnchain.stable_confirmations as u64;
let ping_bad = convo_bad.sign_message(&chain_view_bad, &local_peer_1.private_key, StacksMessageType::Ping(ping_data.clone())).unwrap();
chain_view_bad.burn_stable_block_height -= MAX_NEIGHBOR_BLOCK_DELAY + 1 + burnchain.stable_confirmations as u64;
chain_view_bad.burn_block_height -= MAX_NEIGHBOR_BLOCK_DELAY + 1 + burnchain.stable_confirmations as u64;
db_setup(&mut peerdb_1, &mut burndb_2, &socketaddr_2, &chain_view_bad);
assert_eq!(convo_bad.is_preamble_valid(&ping_bad, &chain_view), Ok(false));
}
// unstable consensus hash mismatch
{
let mut convo_bad = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let ping_data = PingData::new();
let mut chain_view_bad = chain_view.clone();
let old = chain_view_bad.burn_consensus_hash.clone();
chain_view_bad.burn_consensus_hash = ConsensusHash::from_hex("3333333333333333333333333333333333333333").unwrap();
chain_view_bad.last_consensus_hashes.insert(chain_view_bad.burn_block_height, chain_view_bad.burn_consensus_hash.clone());
let ping_bad = convo_bad.sign_message(&chain_view_bad, &local_peer_1.private_key, StacksMessageType::Ping(ping_data.clone())).unwrap();
// considered valid as long as the stable consensus hash is valid
assert_eq!(convo_bad.is_preamble_valid(&ping_bad, &chain_view), Ok(true));
}
// stable consensus hash mismatch
{
let mut convo_bad = ConversationP2P::new(123, 456, &burnchain, &socketaddr_2, &conn_opts, true, 0);
let ping_data = PingData::new();
let mut chain_view_bad = chain_view.clone();
let old = chain_view_bad.burn_stable_consensus_hash.clone();
chain_view_bad.burn_stable_consensus_hash = ConsensusHash::from_hex("1111111111111111111111111111111111111112").unwrap();
chain_view_bad.last_consensus_hashes.insert(chain_view_bad.burn_stable_block_height, chain_view_bad.burn_stable_consensus_hash.clone());
let ping_bad = convo_bad.sign_message(&chain_view_bad, &local_peer_1.private_key, StacksMessageType::Ping(ping_data.clone())).unwrap();
assert_eq!(convo_bad.is_preamble_valid(&ping_bad, &chain_view), Err(net_error::InvalidMessage));
}
}
#[test]
fn convo_process_relayers() {
let conn_opts = ConnectionOptions::default();
let socketaddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8090);
let first_burn_hash = BurnchainHeaderHash::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let burnchain = Burnchain {
peer_version: PEER_VERSION,
network_id: 0,
chain_name: "bitcoin".to_string(),
network_name: "testnet".to_string(),
working_dir: "/nope".to_string(),
consensus_hash_lifetime: 24,
stable_confirmations: 7,
first_block_height: 12300,
first_block_hash: first_burn_hash.clone(),
};
let mut chain_view = BurnchainView {
burn_block_height: 12348,
burn_consensus_hash: ConsensusHash::from_hex("1111111111111111111111111111111111111111").unwrap(),
burn_stable_block_height: 12341,
burn_stable_consensus_hash: ConsensusHash::from_hex("2222222222222222222222222222222222222222").unwrap(),
last_consensus_hashes: HashMap::new()
};
chain_view.make_test_data();
let local_peer = LocalPeer::new(123, burnchain.network_id, PeerAddress::from_ipv4(127, 0, 0, 1), NETWORK_P2P_PORT, None, get_epoch_time_secs() + 123456, UrlString::try_from("http://foo.com").unwrap());
let mut convo = ConversationP2P::new(123, 456, &burnchain, &socketaddr, &conn_opts, true, 0);
let payload = StacksMessageType::Nack(NackData { error_code: 123 });
let msg = convo.sign_reply(&chain_view, &local_peer.private_key, payload, 123).unwrap();
// cycles
let relay_cycles = vec![
RelayData {
peer: NeighborAddress { addrbytes: PeerAddress([0u8; 16]), port: 123, public_key_hash: Hash160([0u8; 20]) },
seq: 123,
},
RelayData {
peer: NeighborAddress { addrbytes: PeerAddress([1u8; 16]), port: 456, public_key_hash: Hash160([0u8; 20]) },
seq: 456,
}
];
// contains localpeer
let self_sent = vec![
RelayData {
peer: NeighborAddress { addrbytes: local_peer.addrbytes.clone(), port: local_peer.port, public_key_hash: Hash160::from_data(&StacksPublicKey::from_private(&local_peer.private_key).to_bytes()) },
seq: 789
}
];
// allowed
let mut relayers = vec![
RelayData {
peer: NeighborAddress { addrbytes: PeerAddress([0u8; 16]), port: 123, public_key_hash: Hash160([0u8; 20]) },
seq: 123,
},
RelayData {
peer: NeighborAddress { addrbytes: PeerAddress([1u8; 16]), port: 456, public_key_hash: Hash160([1u8; 20]) },
seq: 456,
},
];
assert!(!convo.process_relayers(&local_peer, &msg.preamble, relay_cycles));
assert!(!convo.process_relayers(&local_peer, &msg.preamble, self_sent));
assert!(convo.process_relayers(&local_peer, &msg.preamble, relayers.clone()));
// stats updated
assert_eq!(convo.stats.relayed_messages.len(), 2);
let relayer_map = convo.stats.take_relayers();
assert_eq!(convo.stats.relayed_messages.len(), 0);
for r in relayers.drain(..) {
assert!(relayer_map.contains_key(&r.peer));
let stats = relayer_map.get(&r.peer).unwrap();
assert_eq!(stats.num_messages, 1);
assert_eq!(stats.num_bytes, (msg.preamble.payload_len - 1) as u64);
}
}
}
// TODO: test bandwidth limits<|fim▁end|>
|
impl ConversationP2P {
/// Create an unconnected conversation
pub fn new(network_id: u32, version: u32, burnchain: &Burnchain, peer_addr: &SocketAddr, conn_opts: &ConnectionOptions, outbound: bool, conn_id: usize) -> ConversationP2P {
ConversationP2P {
|
<|file_name|>test_angles.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initalization and other aspects of Angle and subclasses"""
import threading
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.coordinates.angles import Longitude, Latitude, Angle
from astropy.coordinates.errors import (
IllegalSecondError, IllegalMinuteError, IllegalHourError,
IllegalSecondWarning, IllegalMinuteWarning)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
''' The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle.'''
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a7 = Angle((54, 7, 26.832), unit=u.degree)
a8 = Angle("54°07'26.832\"")
# (deg,min,sec) *tuples* are acceptable, but lists/arrays are *not*
# because of the need to eventually support arrays of coordinates
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit='hour')
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a7 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a6.radian, a7.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45., u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yield Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.*u.deg
assert type(a8) is Angle
a9 = 1.*u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_methods():
# Most methods tested as part of the Quantity tests.
# A few tests here which caused problems before: #8368
a = Angle([0., 2.], 'deg')
a_mean = a.mean()
assert type(a_mean) is Angle
assert a_mean == 1. * u.degree
a_std = a.std()
assert type(a_std) is Angle
assert a_std == 1. * u.degree
a_var = a.var()
assert type(a_var) is u.Quantity
assert a_var == 1. * u.degree ** 2
a_ptp = a.ptp()
assert type(a_ptp) is Angle
assert a_ptp == 2. * u.degree
a_max = a.max()
assert type(a_max) is Angle
assert a_max == 2. * u.degree
a_min = a.min()
assert type(a_min) is Angle
assert a_min == 0. * u.degree
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting for Angle objects
"""
'''
The string method of Angle has this signature:
def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5,
pad=False):
The "decimal" parameter defaults to False since if you need to print the
Angle as a decimal, there's no need to use the "format" method (see
above).
'''
angle = Angle("54.12412", unit=u.degree)
# __str__ is the default `format`
assert str(angle) == angle.to_string()
res = 'Angle as HMS: 3h36m29.7888s'
assert f"Angle as HMS: {angle.to_string(unit=u.hour)}" == res
res = 'Angle as HMS: 3:36:29.7888'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':')}" == res
res = 'Angle as HMS: 3:36:29.79'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':', precision=2)}" == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as HMS: 3h36m29.7888s'
assert "Angle as HMS: {}".format(angle.to_string(unit=u.hour,
sep=("h", "m", "s"),
precision=4)) == res
res = 'Angle as HMS: 3-36|29.7888'
assert "Angle as HMS: {}".format(angle.to_string(unit=u.hour, sep=["-", "|"],
precision=4)) == res
res = 'Angle as HMS: 3-36-29.7888'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep='-', precision=4)}" == res
res = 'Angle as HMS: 03h36m29.7888s'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, precision=4, pad=True)}" == res
# Same as above, in degrees
angle = Angle("3 36 29.78880", unit=u.degree)
res = 'Angle as DMS: 3d36m29.7888s'
assert f"Angle as DMS: {angle.to_string(unit=u.degree)}" == res
res = 'Angle as DMS: 3:36:29.7888'
assert f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':')}" == res
res = 'Angle as DMS: 3:36:29.79'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep=":",
precision=2)) == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as DMS: 3d36m29.7888s'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree,
sep=("d", "m", "s"),
precision=4)) == res
res = 'Angle as DMS: 3-36|29.7888'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep=["-", "|"],
precision=4)) == res
res = 'Angle as DMS: 3-36-29.7888'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep="-",
precision=4)) == res
res = 'Angle as DMS: 03d36m29.7888s'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, precision=4,
pad=True)) == res
res = 'Angle as rad: 0.0629763rad'
assert f"Angle as rad: {angle.to_string(unit=u.radian)}" == res
res = 'Angle as rad decimal: 0.0629763'
assert f"Angle as rad decimal: {angle.to_string(unit=u.radian, decimal=True)}" == res
# check negative angles
angle = Angle(-1.23456789, unit=u.degree)
angle2 = Angle(-1.23456789, unit=u.hour)
assert angle.to_string() == '-1d14m04.444404s'
assert angle.to_string(pad=True) == '-01d14m04.444404s'
assert angle.to_string(unit=u.hour) == '-0h04m56.2962936s'
assert angle2.to_string(unit=u.hour, pad=True) == '-01h14m04.444404s'
assert angle.to_string(unit=u.radian, decimal=True) == '-0.0215473'
def test_to_string_vector():
# Regression test for the fact that vectorize doesn't work with Numpy 1.6
assert Angle([1./7., 1./7.], unit='deg').to_string()[0] == "0d08m34.28571429s"
assert Angle([1./7.], unit='deg').to_string()[0] == "0d08m34.28571429s"
assert Angle(1./7., unit='deg').to_string() == "0d08m34.28571429s"
def test_angle_format_roundtripping():
"""
Ensures that the string representation of an angle can be used to create a
new valid Angle.
"""
a1 = Angle(0, unit=u.radian)
a2 = Angle(10, unit=u.degree)
a3 = Angle(0.543, unit=u.degree)
a4 = Angle('1d2m3.4s')
assert Angle(str(a1)).degree == a1.degree
assert Angle(str(a2)).degree == a2.degree
assert Angle(str(a3)).degree == a3.degree
assert Angle(str(a4)).degree == a4.degree
# also check Longitude/Latitude
ra = Longitude('1h2m3.4s')
dec = Latitude('1d2m3.4s')
assert_allclose(Angle(str(ra)).degree, ra.degree)
assert_allclose(Angle(str(dec)).degree, dec.degree)
def test_radec():
"""
Tests creation/operations of Longitude and Latitude objects
"""
'''
Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude
and Latitude can parse any unambiguous format (tuples, formatted strings, etc.).
The intention is not to create an Angle subclass for every possible
coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude
are so prevalent in astronomy that it's worth creating ones for these
units. They will be noted as "special" in the docs and use of the just the
Angle class is to be used for other coordinate systems.
'''
with pytest.raises(u.UnitsError):
ra = Longitude("4:08:15.162342") # error - hours or degrees?
with pytest.raises(u.UnitsError):
ra = Longitude("-4:08:15.162342")
# the "smart" initializer allows >24 to automatically do degrees, but the
# Angle-based one does not
# TODO: adjust in 0.3 for whatever behavior is decided on
# ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24
# assert_allclose(ra.degree, 26.570929342)
with pytest.raises(u.UnitsError):
ra = Longitude("26:34:15.345634")
# ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(12)
with pytest.raises(ValueError):
ra = Longitude("garbage containing a d and no units")
ra = Longitude("12h43m23s")
assert_allclose(ra.hour, 12.7230555556)
ra = Longitude((56, 14, 52.52), unit=u.degree) # can accept tuples
# TODO: again, fix based on >24 behavior
# ra = Longitude((56,14,52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((56, 14, 52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((12, 14, 52)) # ambiguous w/o units
ra = Longitude((12, 14, 52), unit=u.hour)
ra = Longitude([56, 64, 52.2], unit=u.degree) # ...but not arrays (yet)
# Units can be specified
ra = Longitude("4:08:15.162342", unit=u.hour)
# TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately
# Where Longitude values are commonly found in hours or degrees, declination is
# nearly always specified in degrees, so this is the default.
# dec = Latitude("-41:08:15.162342")
with pytest.raises(u.UnitsError):
dec = Latitude("-41:08:15.162342")
dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above
def test_negative_zero_dms():
# Test for DMS parser
a = Angle('-00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
# Unicode minus
a = Angle('−00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
def test_negative_zero_dm():
# Test for DM parser
a = Angle('-00:10', u.deg)
assert_allclose(a.degree, -10. / 60.)
def test_negative_zero_hms():
# Test for HMS parser
a = Angle('-00:00:10', u.hour)
assert_allclose(a.hour, -10. / 3600.)
def test_negative_zero_hm():
# Test for HM parser
a = Angle('-00:10', u.hour)
assert_allclose(a.hour, -10. / 60.)
def test_negative_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle('-00:60', u.hour)
assert_allclose(a.hour, -1.)
def test_plus_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle('00:60', u.hour)
assert_allclose(a.hour, 1.)
def test_negative_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('-00:59:60', u.deg)
assert_allclose(a.degree, -1.)
def test_plus_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('+00:59:60', u.deg)
assert_allclose(a.degree, 1.)
def test_negative_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('-00:00:60', u.deg)
assert_allclose(a.degree, -1. / 60.)
def test_plus_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('+00:00:60', u.deg)
assert_allclose(a.degree, 1. / 60.)
def test_angle_to_is_angle():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
assert isinstance(a, Angle)
assert isinstance(a.to(u.rad), Angle)
def test_angle_to_quantity():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
q = u.Quantity(a)
assert isinstance(q, u.Quantity)
assert q.unit is u.deg
def test_quantity_to_angle():
a = Angle(1.0*u.deg)
assert isinstance(a, Angle)
with pytest.raises(u.UnitsError):
Angle(1.0*u.meter)
a = Angle(1.0*u.hour)
assert isinstance(a, Angle)
assert a.unit is u.hourangle
with pytest.raises(u.UnitsError):
Angle(1.0*u.min)
def test_angle_string():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
assert str(a) == '0d01m00s'
a = Angle('00:00:59S', u.deg)
assert str(a) == '-0d00m59s'
a = Angle('00:00:59N', u.deg)
assert str(a) == '0d00m59s'
a = Angle('00:00:59E', u.deg)
assert str(a) == '0d00m59s'
a = Angle('00:00:59W', u.deg)
assert str(a) == '-0d00m59s'
a = Angle('-00:00:10', u.hour)
assert str(a) == '-0h00m10s'
a = Angle('00:00:59E', u.hour)
assert str(a) == '0h00m59s'
a = Angle('00:00:59W', u.hour)
assert str(a) == '-0h00m59s'
a = Angle(3.2, u.radian)
assert str(a) == '3.2rad'
a = Angle(4.2, u.microarcsecond)
assert str(a) == '4.2uarcsec'
a = Angle('1.0uarcsec')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecN')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecS')
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecE')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecW')
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("3d")
assert_allclose(a.value, 3.0)
assert a.unit == u.degree
a = Angle("3dN")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dS")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle("3dE")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dW")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle('10"')
assert_allclose(a.value, 10.0)
assert a.unit == u.arcsecond
a = Angle("10'N")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'S")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("10'E")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'W")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle('45°55′12″N')
assert str(a) == '45d55m12s'
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle('45°55′12″S')
assert str(a) == '-45d55m12s'
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
a = Angle('45°55′12″E')
assert str(a) == '45d55m12s'
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle('45°55′12″W')
assert str(a) == '-45d55m12s'
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
with pytest.raises(ValueError):
Angle('00h00m10sN')
with pytest.raises(ValueError):
Angle('45°55′12″NS')
def test_angle_repr():
assert 'Angle' in repr(Angle(0, u.deg))
assert 'Longitude' in repr(Longitude(0, u.deg))
assert 'Latitude' in repr(Latitude(0, u.deg))
a = Angle(0, u.deg)
repr(a)
def test_large_angle_representation():
"""Test that angles above 360 degrees can be output as strings,
in repr, str, and to_string. (regression test for #1413)"""
a = Angle(350, u.deg) + Angle(350, u.deg)
a.to_string()
a.to_string(u.hourangle)
repr(a)
repr(a.to(u.hourangle))
str(a)
str(a.to(u.hourangle))
def test_wrap_at_inplace():
a = Angle([-20, 150, 350, 360] * u.deg)
out = a.wrap_at('180d', inplace=True)
assert out is None
assert np.all(a.degree == np.array([-20., 150., -10., 0.]))
def test_latitude():
with pytest.raises(ValueError):
lat = Latitude(['91d', '89d'])
with pytest.raises(ValueError):
lat = Latitude('-91d')
lat = Latitude(['90d', '89d'])
# check that one can get items
assert lat[0] == 90 * u.deg
assert lat[1] == 89 * u.deg
# and that comparison with angles works
assert np.all(lat == Angle(['90d', '89d']))
# check setitem works
lat[1] = 45. * u.deg
assert np.all(lat == Angle(['90d', '45d']))
# but not with values out of range
with pytest.raises(ValueError):
lat[0] = 90.001 * u.deg
with pytest.raises(ValueError):
lat[0] = -90.001 * u.deg
# these should also not destroy input (#1851)
assert np.all(lat == Angle(['90d', '45d']))
# conserve type on unit change (closes #1423)
angle = lat.to('radian')
assert type(angle) is Latitude
# but not on calculations
angle = lat - 190 * u.deg
assert type(angle) is Angle
assert angle[0] == -100 * u.deg
lat = Latitude('80d')
angle = lat / 2.<|fim▁hole|> assert angle == 40 * u.deg
angle = lat * 2.
assert type(angle) is Angle
assert angle == 160 * u.deg
angle = -lat
assert type(angle) is Angle
assert angle == -80 * u.deg
# Test errors when trying to interoperate with longitudes.
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude(lon)
assert "A Latitude angle cannot be created from a Longitude angle" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = lon
assert "A Longitude angle cannot be assigned to a Latitude angle" in str(excinfo.value)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lon = Longitude(10, 'deg')
lat = Latitude(Angle(lon))
assert lat.value == 10.0
# Check setitem.
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = Angle(lon)
assert lat.value[0] == 10.0
def test_longitude():
# Default wrapping at 360d with an array input
lon = Longitude(['370d', '88d'])
assert np.all(lon == Longitude(['10d', '88d']))
assert np.all(lon == Angle(['10d', '88d']))
# conserve type on unit change and keep wrap_angle (closes #1423)
angle = lon.to('hourangle')
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[0]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[1:]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
# but not on calculations
angle = lon / 2.
assert np.all(angle == Angle(['5d', '44d']))
assert type(angle) is Angle
assert not hasattr(angle, 'wrap_angle')
angle = lon * 2. + 400 * u.deg
assert np.all(angle == Angle(['420d', '576d']))
assert type(angle) is Angle
# Test setting a mutable value and having it wrap
lon[1] = -10 * u.deg
assert np.all(lon == Angle(['10d', '350d']))
# Test wrapping and try hitting some edge cases
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
assert np.all(lon.degree == np.array([0., 90, 180, 270, 0]))
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle='180d')
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
# Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle)
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
lon.wrap_angle = '180d'
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
lon = Longitude('460d')
assert lon == Angle('100d')
lon.wrap_angle = '90d'
assert lon == Angle('-260d')
# check that if we initialize a longitude with another longitude,
# wrap_angle is kept by default
lon2 = Longitude(lon)
assert lon2.wrap_angle == lon.wrap_angle
# but not if we explicitly set it
lon3 = Longitude(lon, wrap_angle='180d')
assert lon3.wrap_angle == 180 * u.deg
# check that wrap_angle is always an Angle
lon = Longitude(lon, wrap_angle=Longitude(180 * u.deg))
assert lon.wrap_angle == 180 * u.deg
assert lon.wrap_angle.__class__ is Angle
# check that wrap_angle is not copied
wrap_angle=180 * u.deg
lon = Longitude(lon, wrap_angle=wrap_angle)
assert lon.wrap_angle == 180 * u.deg
assert np.may_share_memory(lon.wrap_angle, wrap_angle)
# check for problem reported in #2037 about Longitude initializing to -0
lon = Longitude(0, u.deg)
lonstr = lon.to_string()
assert not lonstr.startswith('-')
# also make sure dtype is correctly conserved
assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float)
assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int)
# Test errors when trying to interoperate with latitudes.
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude(lat)
assert "A Longitude angle cannot be created from a Latitude angle" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = lat
assert "A Latitude angle cannot be assigned to a Longitude angle" in str(excinfo.value)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lat = Latitude(10, 'deg')
lon = Longitude(Angle(lat))
assert lon.value == 10.0
# Check setitem.
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = Angle(lat)
assert lon.value[0] == 10.0
def test_wrap_at():
a = Angle([-20, 150, 350, 360] * u.deg)
assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('360d').degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('180d').degree == np.array([-20., 150., -10., 0.]))
assert np.all(a.wrap_at(np.pi * u.rad).degree == np.array([-20., 150., -10., 0.]))
# Test wrapping a scalar Angle
a = Angle('190d')
assert a.wrap_at('180d') == Angle('-170d')
a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg)
for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125):
aw = a.wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
aw = a.to(u.rad).wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
def test_is_within_bounds():
a = Angle([-20, 150, 350] * u.deg)
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
a = Angle('-20d')
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
def test_angle_mismatched_unit():
a = Angle('+6h7m8s', unit=u.degree)
assert_allclose(a.value, 91.78333333333332)
def test_regression_formatting_negative():
# Regression test for a bug that caused:
#
# >>> Angle(-1., unit='deg').to_string()
# '-1d00m-0s'
assert Angle(-0., unit='deg').to_string() == '-0d00m00s'
assert Angle(-1., unit='deg').to_string() == '-1d00m00s'
assert Angle(-0., unit='hour').to_string() == '-0h00m00s'
assert Angle(-1., unit='hour').to_string() == '-1h00m00s'
def test_regression_formatting_default_precision():
# Regression test for issue #11140
assert Angle('10:20:30.12345678d').to_string() == '10d20m30.12345678s'
assert Angle('10d20m30.123456784564s').to_string() == '10d20m30.12345678s'
assert Angle('10d20m30.123s').to_string() == '10d20m30.123s'
def test_empty_sep():
a = Angle('05h04m31.93830s')
assert a.to_string(sep='', precision=2, pad=True) == '050431.94'
def test_create_tuple():
"""
Tests creation of an angle with a (d,m,s) or (h,m,s) tuple
"""
a1 = Angle((1, 30, 0), unit=u.degree)
assert a1.value == 1.5
a1 = Angle((1, 30, 0), unit=u.hourangle)
assert a1.value == 1.5
def test_list_of_quantities():
a1 = Angle([1*u.deg, 1*u.hourangle])
assert a1.unit == u.deg
assert_allclose(a1.value, [1, 15])
a2 = Angle([1*u.hourangle, 1*u.deg], u.deg)
assert a2.unit == u.deg
assert_allclose(a2.value, [15, 1])
def test_multiply_divide():
# Issue #2273
a1 = Angle([1, 2, 3], u.deg)
a2 = Angle([4, 5, 6], u.deg)
a3 = a1 * a2
assert_allclose(a3.value, [4, 10, 18])
assert a3.unit == (u.deg * u.deg)
a3 = a1 / a2
assert_allclose(a3.value, [.25, .4, .5])
assert a3.unit == u.dimensionless_unscaled
def test_mixed_string_and_quantity():
a1 = Angle(['1d', 1. * u.deg])
assert_array_equal(a1.value, [1., 1.])
assert a1.unit == u.deg
a2 = Angle(['1d', 1 * u.rad * np.pi, '3d'])
assert_array_equal(a2.value, [1., 180., 3.])
assert a2.unit == u.deg
def test_array_angle_tostring():
aobj = Angle([1, 2], u.deg)
assert aobj.to_string().dtype.kind == 'U'
assert np.all(aobj.to_string() == ['1d00m00s', '2d00m00s'])
def test_wrap_at_without_new():
"""
Regression test for subtle bugs from situations where an Angle is
created via numpy channels that don't do the standard __new__ but instead
depend on array_finalize to set state. Longitude is used because the
bug was in its _wrap_angle not getting initialized correctly
"""
l1 = Longitude([1]*u.deg)
l2 = Longitude([2]*u.deg)
l = np.concatenate([l1, l2])
assert l._wrap_angle is not None
def test__str__():
"""
Check the __str__ method used in printing the Angle
"""
# scalar angle
scangle = Angle('10.2345d')
strscangle = scangle.__str__()
assert strscangle == '10d14m04.2s'
# non-scalar array angles
arrangle = Angle(['10.2345d', '-20d'])
strarrangle = arrangle.__str__()
assert strarrangle == '[10d14m04.2s -20d00m00s]'
# summarizing for large arrays, ... should appear
bigarrangle = Angle(np.ones(10000), u.deg)
assert '...' in bigarrangle.__str__()
def test_repr_latex():
"""
Check the _repr_latex_ method, used primarily by IPython notebooks
"""
# try with both scalar
scangle = Angle(2.1, u.deg)
rlscangle = scangle._repr_latex_()
# and array angles
arrangle = Angle([1, 2.1], u.deg)
rlarrangle = arrangle._repr_latex_()
assert rlscangle == r'$2^\circ06{}^\prime00{}^{\prime\prime}$'
assert rlscangle.split('$')[1] in rlarrangle
# make sure the ... appears for large arrays
bigarrangle = Angle(np.ones(50000)/50000., u.deg)
assert '...' in bigarrangle._repr_latex_()
def test_angle_with_cds_units_enabled():
"""Regression test for #5350
Especially the example in
https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
"""
from astropy.units import cds
# the problem is with the parser, so remove it temporarily
from astropy.coordinates.angle_formats import _AngleParser
del _AngleParser._thread_local._parser
with cds.enable():
Angle('5d')
del _AngleParser._thread_local._parser
Angle('5d')
def test_longitude_nan():
# Check that passing a NaN to Longitude doesn't raise a warning
Longitude([0, np.nan, 1] * u.deg)
def test_latitude_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Latitude([0, np.nan, 1] * u.deg)
def test_angle_wrap_at_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Angle([0, np.nan, 1] * u.deg).wrap_at(180*u.deg)
def test_angle_multithreading():
"""
Regression test for issue #7168
"""
angles = ['00:00:00']*10000
def parse_test(i=0):
Angle(angles, unit='hour')
for i in range(10):
threading.Thread(target=parse_test, args=(i,)).start()
@pytest.mark.parametrize("cls", [Angle, Longitude, Latitude])
@pytest.mark.parametrize("input, expstr, exprepr",
[(np.nan*u.deg,
"nan",
"nan deg"),
([np.nan, 5, 0]*u.deg,
"[nan 5d00m00s 0d00m00s]",
"[nan, 5., 0.] deg"),
([6, np.nan, 0]*u.deg,
"[6d00m00s nan 0d00m00s]",
"[6., nan, 0.] deg"),
([np.nan, np.nan, np.nan]*u.deg,
"[nan nan nan]",
"[nan, nan, nan] deg"),
(np.nan*u.hour,
"nan",
"nan hourangle"),
([np.nan, 5, 0]*u.hour,
"[nan 5h00m00s 0h00m00s]",
"[nan, 5., 0.] hourangle"),
([6, np.nan, 0]*u.hour,
"[6h00m00s nan 0h00m00s]",
"[6., nan, 0.] hourangle"),
([np.nan, np.nan, np.nan]*u.hour,
"[nan nan nan]",
"[nan, nan, nan] hourangle"),
(np.nan*u.rad,
"nan",
"nan rad"),
([np.nan, 1, 0]*u.rad,
"[nan 1rad 0rad]",
"[nan, 1., 0.] rad"),
([1.50, np.nan, 0]*u.rad,
"[1.5rad nan 0rad]",
"[1.5, nan, 0.] rad"),
([np.nan, np.nan, np.nan]*u.rad,
"[nan nan nan]",
"[nan, nan, nan] rad")])
def test_str_repr_angles_nan(cls, input, expstr, exprepr):
"""
Regression test for issue #11473
"""
q = cls(input)
assert str(q) == expstr
# Deleting whitespaces since repr appears to be adding them for some values
# making the test fail.
assert repr(q).replace(" ", "") == f'<{cls.__name__}{exprepr}>'.replace(" ","")<|fim▁end|>
|
assert type(angle) is Angle
|
<|file_name|>urls.py<|end_file_name|><|fim▁begin|># encoding: utf-8
# Copyright 2013 maker
# License
"""
Sales module URLs
"""
from django.conf.urls.defaults import *
urlpatterns = patterns('maker.sales.views',
url(r'^(\.(?P<response_format>\w+))?$', 'index', name='sales'),
url(r'^index(\.(?P<response_format>\w+))?/?$', 'index', name='sales_index'),
url(r'^index/open(\.(?P<response_format>\w+))?/?$', 'index_open', name='sales_index_open'),
url(r'^index/assigned(\.(?P<response_format>\w+))?/?$',
'index_assigned', name='sales_index_assigned'),
# Orders
url(r'^order/add(\.(?P<response_format>\w+))?/?$',
'order_add', name='sales_order_add'),
url(r'^order/add/lead/(?P<lead_id>\w+)(\.(?P<response_format>\w+))?/?$',
'order_add', name='sales_order_add_with_lead'),
url(r'^order/add/opportunity/(?P<opportunity_id>\w+)(\.(?P<response_format>\w+))?/?$',
'order_add', name='sales_order_add_with_opportunity'),
url(r'^order/edit/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'order_edit', name='sales_order_edit'),
url(r'^order/view/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'order_view', name='sales_order_view'),
url(r'^order/invoice/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'order_invoice_view', name='sales_order_invoice_view'),
url(r'^order/delete/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'order_delete', name='sales_order_delete'),
# Products
url(r'^product/index(\.(?P<response_format>\w+))?/?$',
'product_index', name='sales_product_index'),
url(r'^product/add/(?P<parent_id>\d+)(\.(?P<response_format>\w+))?/?$',
'product_add', name='sales_product_add'),
url(r'^product/add(\.(?P<response_format>\w+))?/?$',
'product_add', name='sales_product_add'),
url(r'^product/edit/(?P<product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'product_edit', name='sales_product_edit'),
url(r'^product/view/(?P<product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'product_view', name='sales_product_view'),
url(r'^product/delete/(?P<product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'product_delete', name='sales_product_delete'),
# Settings
url(r'^settings/view(\.(?P<response_format>\w+))?/?$', 'settings_view', name='sales_settings_view'),<|fim▁hole|> 'status_add', name='sales_status_add'),
url(r'^status/edit/(?P<status_id>\d+)(\.(?P<response_format>\w+))?/?$',
'status_edit', name='sales_status_edit'),
url(r'^status/view/(?P<status_id>\d+)(\.(?P<response_format>\w+))?/?$',
'status_view', name='sales_status_view'),
url(r'^status/delete/(?P<status_id>\d+)(\.(?P<response_format>\w+))?/?$',
'status_delete', name='sales_status_delete'),
# Subscriptions
url(r'^subscription/add(\.(?P<response_format>\w+))?/?$',
'subscription_add', name='sales_subscription_add'),
url(r'^subscription/add/order/(?P<order_id>\w+)/product/(?P<product_id>\w+)(\.(?P<response_format>\w+))?/?$',
'subscription_add', name='sales_subscription_add_with_order_and_product'),
url(r'^subscription/add/(?P<productset_id>\w+)(\.(?P<response_format>\w+))?/?$',
'subscription_add', name='sales_subscription_add_with_product'),
url(r'^subscription/edit/(?P<subscription_id>\d+)(\.(?P<response_format>\w+))?/?$',
'subscription_edit', name='sales_subscription_edit'),
url(r'^subscription/view/(?P<subscription_id>\d+)(\.(?P<response_format>\w+))?/?$',
'subscription_view', name='sales_subscription_view'),
url(r'^subscription/delete/(?P<subscription_id>\d+)(\.(?P<response_format>\w+))?/?$',
'subscription_delete', name='sales_subscription_delete'),
# Ordered Products
url(r'^ordered_product/add/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'ordered_product_add', name='sales_ordered_product_add'),
url(r'^ordered_product/edit/(?P<ordered_product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'ordered_product_edit', name='sales_ordered_product_edit'),
url(r'^ordered_product/view/(?P<ordered_product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'ordered_product_view', name='sales_ordered_product_view'),
url(r'^ordered_product/delete/(?P<ordered_product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'ordered_product_delete', name='sales_ordered_product_delete'),
# Sources
url(r'^source/add(\.(?P<response_format>\w+))?/?$',
'source_add', name='sales_source_add'),
url(r'^source/edit/(?P<source_id>\d+)(\.(?P<response_format>\w+))?/?$',
'source_edit', name='sales_source_edit'),
url(r'^source/view/(?P<source_id>\d+)(\.(?P<response_format>\w+))?/?$',
'source_view', name='sales_source_view'),
url(r'^source/delete/(?P<source_id>\d+)(\.(?P<response_format>\w+))?/?$',
'source_delete', name='sales_source_delete'),
# Leads
url(r'^lead/index(\.(?P<response_format>\w+))?/?$',
'lead_index', name='sales_lead_index'),
url(r'^lead/index/assigned(\.(?P<response_format>\w+))?/?$',
'lead_index_assigned', name='sales_lead_index_assigned'),
url(r'^lead/add(\.(?P<response_format>\w+))?/?$',
'lead_add', name='sales_lead_add'),
url(r'^lead/edit/(?P<lead_id>\d+)(\.(?P<response_format>\w+))?/?$',
'lead_edit', name='sales_lead_edit'),
url(r'^lead/view/(?P<lead_id>\d+)(\.(?P<response_format>\w+))?/?$',
'lead_view', name='sales_lead_view'),
url(r'^lead/delete/(?P<lead_id>\d+)(\.(?P<response_format>\w+))?/?$',
'lead_delete', name='sales_lead_delete'),
# Opportunities
url(r'^opportunity/index(\.(?P<response_format>\w+))?/?$',
'opportunity_index', name='sales_opportunity_index'),
url(r'^opportunity/index/assigned(\.(?P<response_format>\w+))?/?$',
'opportunity_index_assigned', name='sales_opportunity_index_assigned'),
url(r'^opportunity/add(\.(?P<response_format>\w+))?/?$',
'opportunity_add', name='sales_opportunity_add'),
url(r'^opportunity/add/lead/(?P<lead_id>\w+)(\.(?P<response_format>\w+))?/?$',
'opportunity_add', name='sales_opportunity_add_with_lead'),
url(r'^opportunity/edit/(?P<opportunity_id>\d+)(\.(?P<response_format>\w+))?/?$',
'opportunity_edit', name='sales_opportunity_edit'),
url(r'^opportunity/view/(?P<opportunity_id>\d+)(\.(?P<response_format>\w+))?/?$',
'opportunity_view', name='sales_opportunity_view'),
url(r'^opportunity/delete/(?P<opportunity_id>\d+)(\.(?P<response_format>\w+))?/?$',
'opportunity_delete', name='sales_opportunity_delete'),
# AJAX lookups
url(r'^ajax/subscription(\.(?P<response_format>\w+))?/?$',
'ajax_subscription_lookup', name='sales_ajax_subscription_lookup'),
)<|fim▁end|>
|
url(r'^settings/edit(\.(?P<response_format>\w+))?/?$', 'settings_edit', name='sales_settings_edit'),
# Statuses
url(r'^status/add(\.(?P<response_format>\w+))?/?$',
|
<|file_name|>ApplicationTest.java<|end_file_name|><|fim▁begin|>package com.doglandia.animatingtextviewlib;
import android.app.Application;
import android.test.ApplicationTestCase;
/**
* <a href="http://d.android.com/tools/testing/testing_android.html">Testing Fundamentals</a><|fim▁hole|>public class ApplicationTest extends ApplicationTestCase<Application> {
public ApplicationTest() {
super(Application.class);
}
}<|fim▁end|>
|
*/
|
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import json
import pytest
import requests
import requests.exceptions
from tests.constants import LOCALHOST_REGISTRY_HTTP, DOCKER0_REGISTRY_HTTP, MOCK, TEST_IMAGE
from tests.util import uuid_value
from osbs.utils import ImageName
from atomic_reactor.core import ContainerTasker
from atomic_reactor.constants import CONTAINER_DOCKERPY_BUILD_METHOD
from atomic_reactor.inner import DockerBuildWorkflow
from tests.constants import MOCK_SOURCE
if MOCK:<|fim▁hole|>@pytest.fixture()
def temp_image_name():
return ImageName(repo=("atomic-reactor-tests-%s" % uuid_value()))
@pytest.fixture()
def is_registry_running():
"""
is docker registry running (at {docker0,lo}:5000)?
"""
try:
lo_response = requests.get(LOCALHOST_REGISTRY_HTTP)
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
try:
lo_response = requests.get(DOCKER0_REGISTRY_HTTP) # leap of faith
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
return True
@pytest.fixture(scope="module")
def docker_tasker():
if MOCK:
mock_docker()
ct = ContainerTasker(retry_times=0)
ct.build_method = CONTAINER_DOCKERPY_BUILD_METHOD
return ct
@pytest.fixture(params=[True, False])
def reactor_config_map(request):
return request.param
@pytest.fixture(params=[True, False])
def inspect_only(request):
return request.param
@pytest.fixture
def user_params(monkeypatch):
"""
Setting default image_tag in the env var USER_PARAMS. Any tests requiring
to create an instance of :class:`DockerBuildWorkflow` requires this fixture.
"""
monkeypatch.setenv('USER_PARAMS', json.dumps({'image_tag': TEST_IMAGE}))
@pytest.fixture
def workflow(user_params):
return DockerBuildWorkflow(source=MOCK_SOURCE)
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
if report.passed or report.skipped:
del cells[:]<|fim▁end|>
|
from tests.docker_mock import mock_docker
|
<|file_name|>test_character.py<|end_file_name|><|fim▁begin|>import unittest
import pytest
from libweasyl import ratings
from weasyl.test import db_utils
from weasyl import character
@pytest.mark.usefixtures('db')
class SelectCountTestCase(unittest.TestCase):
def setUp(self):
self.user1 = db_utils.create_user()
self.user2 = db_utils.create_user()
self.friend1 = db_utils.create_user()
db_utils.create_friendship(self.user1, self.friend1)
self.count = 20
self.pivot = 5
s = db_utils.create_characters(self.count, self.user1, ratings.GENERAL.code)
self.pivotid = s[self.pivot]
def test_count_backid(self):
self.assertEqual(
self.count - self.pivot - 1,
character.select_count(self.user1, ratings.GENERAL.code, backid=self.pivotid))
def test_count_nextid(self):
self.assertEqual(
self.pivot,
character.select_count(self.user1, ratings.GENERAL.code, nextid=self.pivotid))
def test_see_friends_character(self):
"""
Should be able to see a friend's friends-only character in a listing.
"""
c = db_utils.create_character(self.friend1, friends_only=True)
self.assertEqual(
self.count + 1,
character.select_count(self.user1, ratings.GENERAL.code))
self.assertEqual(
c,
character.select_list(self.user1, ratings.GENERAL.code, 100)[0]['charid'])
def test_cannot_see_non_friends_character(self):
"""
Should not be able to see a non-friend's friends-ony character in a listing.
"""<|fim▁hole|> db_utils.create_character(self.user2, friends_only=True)
self.assertEqual(
self.count,
character.select_count(self.user1, ratings.GENERAL.code))
def test_can_see_own_blocktag_character(self):
"""
Can see your own character in a listing even with a blocked tag.
"""
block_tagid = db_utils.create_tag("blocked")
db_utils.create_blocktag(self.user1, block_tagid, ratings.GENERAL.code)
charid = db_utils.create_character(self.user1, name="My blocktag character")
db_utils.create_character_tag(block_tagid, charid)
# A journal that we should NOT see.
other_charid = db_utils.create_character(self.user2, name="Other user's blocktag character")
db_utils.create_character_tag(block_tagid, other_charid)
self.assertEqual(
charid,
character.select_list(self.user1, ratings.GENERAL.code, 100)[0]['charid'])
def test_can_see_own_rating_character(self):
"""
Can see your own character in a listing even when it's above your max rating.
"""
charid = db_utils.create_character(self.user1, rating=ratings.EXPLICIT.code)
db_utils.create_character(self.user2, rating=ratings.EXPLICIT.code)
self.assertEqual(
charid,
character.select_list(self.user1, ratings.GENERAL.code, 100)[0]['charid'])<|fim▁end|>
| |
<|file_name|>NettyWebSocketChannelHandler.java<|end_file_name|><|fim▁begin|>package com.netease.xmpp.websocket.handler;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
import org.jboss.netty.handler.codec.http.HttpHeaders;
import org.jboss.netty.handler.codec.http.HttpRequest;
import org.jboss.netty.handler.codec.http.HttpResponse;
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.jboss.netty.handler.codec.http.HttpHeaders.Names;
import org.jboss.netty.handler.codec.http.HttpHeaders.Values;
import org.jboss.netty.handler.codec.http.websocket.WebSocketFrame;
import org.jboss.netty.handler.codec.http.websocket.WebSocketFrameDecoder;
import org.jboss.netty.handler.codec.http.websocket.WebSocketFrameEncoder;
import org.jboss.netty.util.CharsetUtil;
import org.jivesoftware.multiplexer.spi.ClientFailoverDeliverer;
import com.netease.xmpp.websocket.CMWebSocketConnection;
import com.netease.xmpp.websocket.codec.Hybi10WebSocketFrameDecoder;
import com.netease.xmpp.websocket.codec.Hybi10WebSocketFrameEncoder;
import com.netease.xmpp.websocket.codec.Pong;
import sun.misc.BASE64Encoder;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.Executor;
import static org.jboss.netty.handler.codec.http.HttpHeaders.isKeepAlive;
import static org.jboss.netty.handler.codec.http.HttpHeaders.setContentLength;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ORIGIN;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.SEC_WEBSOCKET_KEY1;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.SEC_WEBSOCKET_KEY2;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.SEC_WEBSOCKET_LOCATION;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.SEC_WEBSOCKET_ORIGIN;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.SEC_WEBSOCKET_PROTOCOL;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.UPGRADE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.WEBSOCKET_LOCATION;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.WEBSOCKET_ORIGIN;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.WEBSOCKET_PROTOCOL;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.WEBSOCKET;
import static org.jboss.netty.handler.codec.http.HttpMethod.GET;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
public class NettyWebSocketChannelHandler extends SimpleChannelUpstreamHandler {
private static final MessageDigest SHA_1;
static {
try {
SHA_1 = MessageDigest.getInstance("SHA1");
} catch (NoSuchAlgorithmException e) {
throw new InternalError("SHA-1 not supported on this platform");
}
}
private static final String ACCEPT_GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
private static final BASE64Encoder encoder = new BASE64Encoder();
private static final Charset ASCII = Charset.forName("ASCII");
protected final Executor executor;
protected final WebSocketHandler handler;
protected CMWebSocketConnection webSocketConnection;
public NettyWebSocketChannelHandler(Executor executor, WebSocketHandler handler) {
this.handler = handler;
this.executor = executor;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, final MessageEvent e) throws Exception {
Object msg = e.getMessage();
if (msg instanceof HttpRequest) {
handleHttpRequest(ctx, (HttpRequest) msg);
} else if (msg instanceof WebSocketFrame) {
handleWebSocketFrame(ctx, (WebSocketFrame) msg);
}
}
@Override
public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e)
throws Exception {
executor.execute(new Runnable() {
@Override
public void run() {
try {
handler.onClose(webSocketConnection);
} catch (Exception e1) {
e1.printStackTrace();
}
}
});
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, final ExceptionEvent e) throws Exception {
System.out.println("EXCEPTION");
e.getChannel().close();
e.getCause().printStackTrace();
}
private void handleHttpRequest(ChannelHandlerContext ctx, HttpRequest req) throws Exception {
// Allow only GET .
if (req.getMethod() != GET) {
sendHttpResponse(ctx, req, new DefaultHttpResponse(HTTP_1_1, FORBIDDEN));
return;
}
// Serve the WebSocket handshake request.
if (Values.UPGRADE.equalsIgnoreCase(req.getHeader(CONNECTION))
&& WEBSOCKET.equalsIgnoreCase(req.getHeader(Names.UPGRADE))) {
// Create the WebSocket handshake response.
HttpResponse res = new DefaultHttpResponse(HTTP_1_1, new HttpResponseStatus(101,
"Web Socket Protocol Handshake"));
res.addHeader(Names.UPGRADE, WEBSOCKET);
res.addHeader(CONNECTION, Values.UPGRADE);
prepareConnection(req, res, ctx);
try {
handler.onOpen(this.webSocketConnection);
} catch (Exception e) {
// TODO
e.printStackTrace();
}
}
}
private void handleWebSocketFrame(ChannelHandlerContext ctx, final WebSocketFrame frame) {
try {
if (frame instanceof Pong) {
handler.onPong(webSocketConnection, frame.getTextData());
} else {
if (frame.isText()) {
handler.onMessage(webSocketConnection, frame.getTextData());
} else {
handler.onMessage(webSocketConnection, frame.getBinaryData().array());
}
}
} catch (Throwable t) {
// TODO
t.printStackTrace();
}
}
private void prepareConnection(HttpRequest req, HttpResponse res, ChannelHandlerContext ctx) {
this.webSocketConnection = new CMWebSocketConnection(ctx.getChannel(),
new ClientFailoverDeliverer());
if (isHybi10WebSocketRequest(req)) {
this.webSocketConnection.setVersion(WebSocketConnection.Version.HYBI_10);
upgradeResponseHybi10(req, res);
ctx.getChannel().write(res);
adjustPipelineToHybi(ctx);
} else if (isHixie76WebSocketRequest(req)) {
this.webSocketConnection.setVersion(WebSocketConnection.Version.HIXIE_76);
upgradeResponseHixie76(req, res);
ctx.getChannel().write(res);
adjustPipelineToHixie(ctx);
} else {
this.webSocketConnection.setVersion(WebSocketConnection.Version.HIXIE_75);
upgradeResponseHixie75(req, res);
ctx.getChannel().write(res);
adjustPipelineToHixie(ctx);
}
}
private void adjustPipelineToHixie(ChannelHandlerContext ctx) {
<|fim▁hole|> p.replace("decoder", "wsdecoder", new WebSocketFrameDecoder());
p.replace("handler", "wshandler", this);
p.replace("encoder", "wsencoder", new WebSocketFrameEncoder());
}
private void adjustPipelineToHybi(ChannelHandlerContext ctx) {
ChannelPipeline p = ctx.getChannel().getPipeline();
p.remove("aggregator");
p.replace("decoder", "wsdecoder", new Hybi10WebSocketFrameDecoder());
p.replace("handler", "wshandler", this);
p.replace("encoder", "wsencoder", new Hybi10WebSocketFrameEncoder());
}
private boolean isHybi10WebSocketRequest(HttpRequest req) {
return req.containsHeader("Sec-WebSocket-Version");
}
private boolean isHixie76WebSocketRequest(HttpRequest req) {
return req.containsHeader(SEC_WEBSOCKET_KEY1) && req.containsHeader(SEC_WEBSOCKET_KEY2);
}
private static synchronized String generateAccept(String key) {
String s = key + ACCEPT_GUID;
byte[] b = SHA_1.digest(s.getBytes(ASCII));
return encoder.encode(b);
}
private void upgradeResponseHybi10(HttpRequest req, HttpResponse res) {
String version = req.getHeader("Sec-WebSocket-Version");
if (!"8".equals(version)) {
res.setStatus(HttpResponseStatus.UPGRADE_REQUIRED);
res.setHeader("Sec-WebSocket-Version", "8");
return;
}
String key = req.getHeader("Sec-WebSocket-Key");
if (key == null) {
res.setStatus(HttpResponseStatus.BAD_REQUEST);
return;
}
String accept = generateAccept(key);
res.setStatus(new HttpResponseStatus(101, "Switching Protocols"));
res.addHeader(UPGRADE, WEBSOCKET.toLowerCase());
res.addHeader(CONNECTION, UPGRADE);
res.addHeader("Sec-WebSocket-Accept", accept);
}
private void upgradeResponseHixie76(HttpRequest req, HttpResponse res) {
res.setStatus(new HttpResponseStatus(101, "Web Socket Protocol Handshake"));
res.addHeader(UPGRADE, WEBSOCKET);
res.addHeader(CONNECTION, UPGRADE);
res.addHeader(SEC_WEBSOCKET_ORIGIN, req.getHeader(ORIGIN));
res.addHeader(SEC_WEBSOCKET_LOCATION, getWebSocketLocation(req));
String protocol = req.getHeader(SEC_WEBSOCKET_PROTOCOL);
if (protocol != null) {
res.addHeader(SEC_WEBSOCKET_PROTOCOL, protocol);
}
// Calculate the answer of the challenge.
String key1 = req.getHeader(SEC_WEBSOCKET_KEY1);
String key2 = req.getHeader(SEC_WEBSOCKET_KEY2);
int a = (int) (Long.parseLong(key1.replaceAll("[^0-9]", "")) / key1.replaceAll("[^ ]", "")
.length());
int b = (int) (Long.parseLong(key2.replaceAll("[^0-9]", "")) / key2.replaceAll("[^ ]", "")
.length());
long c = req.getContent().readLong();
ChannelBuffer input = ChannelBuffers.buffer(16);
input.writeInt(a);
input.writeInt(b);
input.writeLong(c);
try {
ChannelBuffer output = ChannelBuffers.wrappedBuffer(MessageDigest.getInstance("MD5")
.digest(input.array()));
res.setContent(output);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
private void upgradeResponseHixie75(HttpRequest req, HttpResponse res) {
res.setStatus(new HttpResponseStatus(101, "Web Socket Protocol Handshake"));
res.addHeader(UPGRADE, WEBSOCKET);
res.addHeader(CONNECTION, HttpHeaders.Values.UPGRADE);
res.addHeader(WEBSOCKET_ORIGIN, req.getHeader(ORIGIN));
res.addHeader(WEBSOCKET_LOCATION, getWebSocketLocation(req));
String protocol = req.getHeader(WEBSOCKET_PROTOCOL);
if (protocol != null) {
res.addHeader(WEBSOCKET_PROTOCOL, protocol);
}
}
private String getWebSocketLocation(HttpRequest req) {
return "ws://" + req.getHeader(HttpHeaders.Names.HOST) + req.getUri();
}
private void sendHttpResponse(ChannelHandlerContext ctx, HttpRequest req, HttpResponse res) {
// Generate an error page if response status code is not OK (200).
if (res.getStatus().getCode() != 200) {
res.setContent(ChannelBuffers.copiedBuffer(res.getStatus().toString(),
CharsetUtil.UTF_8));
setContentLength(res, res.getContent().readableBytes());
}
// Send the response and close the connection if necessary.
ChannelFuture f = ctx.getChannel().write(res);
if (!isKeepAlive(req) || res.getStatus().getCode() != 200) {
f.addListener(ChannelFutureListener.CLOSE);
}
}
}<|fim▁end|>
|
ChannelPipeline p = ctx.getChannel().getPipeline();
p.remove("aggregator");
|
<|file_name|>arbitrary.rs<|end_file_name|><|fim▁begin|>use std::char;
use std::collections::{
BTreeMap,
BTreeSet,
BinaryHeap,
HashMap,
HashSet,
LinkedList,
VecDeque,
};
use std::hash::Hash;
use std::ops::{Range, RangeFrom, RangeTo, RangeFull};
use rand::Rng;
/// `Gen` wraps a `rand::Rng` with parameters to control the distribution of
/// random values.
///
/// A value with type satisfying the `Gen` trait can be constructed with the
/// `gen` function in this crate.
pub trait Gen : Rng {
fn size(&self) -> usize;
}
/// StdGen is the default implementation of `Gen`.
///
/// Values of type `StdGen` can be created with the `gen` function in this
/// crate.
pub struct StdGen<R> {
rng: R,
size: usize,
}
/// Returns a `StdGen` with the given configuration using any random number
/// generator.
///
/// The `size` parameter controls the size of random values generated.
/// For example, it specifies the maximum length of a randomly generated vector
/// and also will specify the maximum magnitude of a randomly generated number.
impl<R: Rng> StdGen<R> {
pub fn new(rng: R, size: usize) -> StdGen<R> {
StdGen { rng: rng, size: size }
}
}
impl<R: Rng> Rng for StdGen<R> {
fn next_u32(&mut self) -> u32 { self.rng.next_u32() }
// some RNGs implement these more efficiently than the default, so
// we might as well defer to them.
fn next_u64(&mut self) -> u64 { self.rng.next_u64() }
fn fill_bytes(&mut self, dest: &mut [u8]) { self.rng.fill_bytes(dest) }
}
impl<R: Rng> Gen for StdGen<R> {
fn size(&self) -> usize { self.size }
}
/// Creates a shrinker with zero elements.
pub fn empty_shrinker<A: 'static>() -> Box<Iterator<Item=A>> {
Box::new(None.into_iter())
}
/// Creates a shrinker with a single element.
pub fn single_shrinker<A: 'static>(value: A) -> Box<Iterator<Item=A>> {
Box::new(Some(value).into_iter())
}
/// `Arbitrary` describes types whose values can be randomly generated and
/// shrunk.
///
/// Aside from shrinking, `Arbitrary` is different from the `std::Rand` trait
/// in that it uses a `Gen` to control the distribution of random values.
///
/// As of now, all types that implement `Arbitrary` must also implement
/// `Clone`. (I'm not sure if this is a permanent restriction.)
///
/// They must also be sendable and static since every test is run in its own
/// thread using `thread::Builder::spawn`, which requires the `Send + 'static`
/// bounds.
pub trait Arbitrary : Clone + Send + 'static {
fn arbitrary<G: Gen>(g: &mut G) -> Self;
fn shrink(&self) -> Box<Iterator<Item=Self>> {
empty_shrinker()
}
}
impl Arbitrary for () {
fn arbitrary<G: Gen>(_: &mut G) -> () { () }
}
impl Arbitrary for bool {
fn arbitrary<G: Gen>(g: &mut G) -> bool { g.gen() }
fn shrink(&self) -> Box<Iterator<Item=bool>> {
match *self {
true => single_shrinker(false),
false => empty_shrinker(),
}
}
}
impl<A: Arbitrary> Arbitrary for Option<A> {
fn arbitrary<G: Gen>(g: &mut G) -> Option<A> {
if g.gen() {
None
} else {
Some(Arbitrary::arbitrary(g))
}
}
fn shrink(&self) -> Box<Iterator<Item=Option<A>>> {
match *self {
None => empty_shrinker(),
Some(ref x) => {
let chain = single_shrinker(None).chain(x.shrink().map(Some));
Box::new(chain)
}
}
}
}
impl<A: Arbitrary, B: Arbitrary> Arbitrary for Result<A, B> {
fn arbitrary<G: Gen>(g: &mut G) -> Result<A, B> {
if g.gen() {
Ok(Arbitrary::arbitrary(g))
} else {
Err(Arbitrary::arbitrary(g))
}
}
fn shrink(&self) -> Box<Iterator<Item=Result<A, B>>> {
match *self {
Ok(ref x) => {
let xs = x.shrink();
let tagged = xs.map(Ok);
Box::new(tagged)
}
Err(ref x) => {
let xs = x.shrink();
let tagged = xs.map(Err);
Box::new(tagged)
}
}
}
}
macro_rules! impl_arb_for_tuple {
(($var_a:ident, $type_a:ident) $(, ($var_n:ident, $type_n:ident))*) => (
impl<$type_a: Arbitrary, $($type_n: Arbitrary),*> Arbitrary
for ($type_a, $($type_n),*) {
fn arbitrary<GEN: Gen>(g: &mut GEN) -> ($type_a, $($type_n),*) {
(
Arbitrary::arbitrary(g),
$({
let arb: $type_n = Arbitrary::arbitrary(g);
arb
},
)*
)
}
fn shrink(&self)
-> Box<Iterator<Item=($type_a, $($type_n),*)>> {
let (ref $var_a, $(ref $var_n),*) = *self;
let sa = $var_a.shrink().scan(
($($var_n.clone(),)*),
|&mut ($(ref $var_n,)*), $var_a|
Some(($var_a, $($var_n.clone(),)*))
);
let srest = ($($var_n.clone(),)*).shrink()
.scan($var_a.clone(), |$var_a, ($($var_n,)*)|
Some(($var_a.clone(), $($var_n,)*))
);
Box::new(sa.chain(srest))
}
}
);
}
impl_arb_for_tuple!((a, A));
impl_arb_for_tuple!((a, A), (b, B));
impl_arb_for_tuple!((a, A), (b, B), (c, C));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D), (e, E));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F),
(g, G));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F),
(g, G), (h, H));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F),
(g, G), (h, H), (i, I));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F),
(g, G), (h, H), (i, I), (j, J));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F),
(g, G), (h, H), (i, I), (j, J), (k, K));
impl_arb_for_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F),
(g, G), (h, H), (i, I), (j, J), (k, K), (l, L));
impl<A: Arbitrary> Arbitrary for Vec<A> {
fn arbitrary<G: Gen>(g: &mut G) -> Vec<A> {
let size = { let s = g.size(); g.gen_range(0, s) };
(0..size).map(|_| Arbitrary::arbitrary(g)).collect()
}
fn shrink(&self) -> Box<Iterator<Item=Vec<A>>> {
VecShrinker::new(self.clone())
}
}
///Iterator which returns successive attempts to shrink the vector `seed`
struct VecShrinker<A> {
seed: Vec<A>,
///How much which is removed when trying with smaller vectors
size: usize,
///The end of the removed elements
offset: usize,
///The shrinker for the element at `offset` once shrinking of individual elements are attempted
element_shrinker: Box<Iterator<Item=A>>
}
impl <A: Arbitrary> VecShrinker<A> {
fn new(seed: Vec<A>) -> Box<Iterator<Item=Vec<A>>> {
let es = match seed.get(0) {
Some(e) => e.shrink(),
None => return empty_shrinker()
};
let size = seed.len();
Box::new(VecShrinker { seed: seed, size: size, offset: size, element_shrinker: es })
}
///Returns the next shrunk element if any, `offset` points to the index after the returned
///element after the function returns
fn next_element(&mut self) -> Option<A> {
loop {
match self.element_shrinker.next() {
Some(e) => return Some(e),
None => {
match self.seed.get(self.offset) {
Some(e) => {
self.element_shrinker = e.shrink();
self.offset += 1;
}
None => return None
}
}
}
}
}
}
impl <A> Iterator for VecShrinker<A>
where A: Arbitrary {
type Item = Vec<A>;
fn next(&mut self) -> Option<Vec<A>> {
//Try with an empty vector first
if self.size == self.seed.len() {
self.size /= 2;
self.offset = self.size;
return Some(vec![])
}
if self.size != 0 {
//Generate a smaller vector by removing the elements between (offset - size) and offset
let xs1 = self.seed[..(self.offset - self.size)].iter()
.chain(&self.seed[self.offset..])
.cloned()
.collect();
self.offset += self.size;
//Try to reduce the amount removed from the vector once all previous sizes tried
if self.offset > self.seed.len() {
self.size /= 2;
self.offset = self.size;
}
Some(xs1)
}
else {
//A smaller vector did not work so try to shrink each element of the vector instead
//Reuse `offset` as the index determining which element to shrink
//The first element shrinker is already created so skip the first offset
//(self.offset == 0 only on first entry to this part of the iterator)
if self.offset == 0 { self.offset = 1 }
match self.next_element() {
Some(e) => Some(self.seed[..self.offset-1].iter().cloned()
.chain(Some(e).into_iter())
.chain(self.seed[self.offset..].iter().cloned())
.collect()),
None => None
}
}
}
}
impl<K: Arbitrary + Ord, V: Arbitrary> Arbitrary for BTreeMap<K, V> {
fn arbitrary<G: Gen>(g: &mut G) -> BTreeMap<K, V> {
let vec: Vec<(K, V)> = Arbitrary::arbitrary(g);
vec.into_iter().collect()
}
fn shrink(&self) -> Box<Iterator<Item=BTreeMap<K, V>>> {
let vec: Vec<(K, V)> = self.clone().into_iter().collect();
Box::new(vec.shrink().map(|v| v.into_iter().collect::<BTreeMap<K, V>>()))
}
}
impl<K: Arbitrary + Eq + Hash, V: Arbitrary> Arbitrary for HashMap<K, V> {
fn arbitrary<G: Gen>(g: &mut G) -> HashMap<K, V> {
let vec: Vec<(K, V)> = Arbitrary::arbitrary(g);
vec.into_iter().collect()
}
fn shrink(&self) -> Box<Iterator<Item=HashMap<K, V>>> {
let vec: Vec<(K, V)> = self.clone().into_iter().collect();
Box::new(vec.shrink().map(|v| v.into_iter().collect::<HashMap<K, V>>()))
}
}
impl<T: Arbitrary + Ord> Arbitrary for BTreeSet<T> {
fn arbitrary<G: Gen>(g: &mut G) -> BTreeSet<T> {
let vec: Vec<T> = Arbitrary::arbitrary(g);
vec.into_iter().collect()
}
fn shrink(&self) -> Box<Iterator<Item=BTreeSet<T>>> {
let vec: Vec<T> = self.clone().into_iter().collect();
Box::new(vec.shrink().map(|v| v.into_iter().collect::<BTreeSet<T>>()))
}
}
impl<T: Arbitrary + Ord> Arbitrary for BinaryHeap<T> {
fn arbitrary<G: Gen>(g: &mut G) -> BinaryHeap<T> {
let vec: Vec<T> = Arbitrary::arbitrary(g);
vec.into_iter().collect()
}
fn shrink(&self) -> Box<Iterator<Item=BinaryHeap<T>>> {
let vec: Vec<T> = self.clone().into_iter().collect();
Box::new(vec.shrink().map(|v| v.into_iter().collect::<BinaryHeap<T>>()))
}
}
impl<T: Arbitrary + Eq + Hash> Arbitrary for HashSet<T> {
fn arbitrary<G: Gen>(g: &mut G) -> HashSet<T> {
let vec: Vec<T> = Arbitrary::arbitrary(g);
vec.into_iter().collect()
}
fn shrink(&self) -> Box<Iterator<Item=HashSet<T>>> {
let vec: Vec<T> = self.clone().into_iter().collect();
Box::new(vec.shrink().map(|v| v.into_iter().collect::<HashSet<T>>()))
}
}
impl<T: Arbitrary> Arbitrary for LinkedList<T> {
fn arbitrary<G: Gen>(g: &mut G) -> LinkedList<T> {
let vec: Vec<T> = Arbitrary::arbitrary(g);
vec.into_iter().collect()
}
fn shrink(&self) -> Box<Iterator<Item=LinkedList<T>>> {
let vec: Vec<T> = self.clone().into_iter().collect();
Box::new(vec.shrink().map(|v| v.into_iter().collect::<LinkedList<T>>()))
}
}
impl<T: Arbitrary> Arbitrary for VecDeque<T> {
fn arbitrary<G: Gen>(g: &mut G) -> VecDeque<T> {
let vec: Vec<T> = Arbitrary::arbitrary(g);
vec.into_iter().collect()
}
fn shrink(&self) -> Box<Iterator<Item=VecDeque<T>>> {
let vec: Vec<T> = self.clone().into_iter().collect();
Box::new(vec.shrink().map(|v| v.into_iter().collect::<VecDeque<T>>()))
}
}
impl Arbitrary for String {
fn arbitrary<G: Gen>(g: &mut G) -> String {
let size = { let s = g.size(); g.gen_range(0, s) };
let mut s = String::with_capacity(size);
for _ in 0..size {
s.push(char::arbitrary(g));
}
s
}
fn shrink(&self) -> Box<Iterator<Item=String>> {
// Shrink a string by shrinking a vector of its characters.
let chars: Vec<char> = self.chars().collect();
Box::new(chars.shrink().map(|x| x.into_iter().collect::<String>()))
}
}
impl Arbitrary for char {
fn arbitrary<G: Gen>(g: &mut G) -> char { g.gen() }
fn shrink(&self) -> Box<Iterator<Item=char>> {
Box::new((*self as u32).shrink().filter_map(char::from_u32))
}
}
macro_rules! unsigned_shrinker {
($ty:ty) => {
mod shrinker {
pub struct UnsignedShrinker {
x: $ty,
i: $ty,
}
impl UnsignedShrinker {
pub fn new(x: $ty) -> Box<Iterator<Item=$ty>> {
if x == 0 {
super::empty_shrinker()
} else {
Box::new(vec![0].into_iter().chain(
UnsignedShrinker {
x: x,
i: x / 2,
}
))
}
}
}
impl Iterator for UnsignedShrinker {
type Item = $ty;
fn next(&mut self) -> Option<$ty> {
if self.x - self.i < self.x {
let result = Some(self.x - self.i);
self.i = self.i / 2;
result
} else {
None
}
}
}
}
}
}
macro_rules! unsigned_arbitrary {
($($ty:ty),*) => {
$(
impl Arbitrary for $ty {
fn arbitrary<G: Gen>(g: &mut G) -> $ty {
#![allow(trivial_numeric_casts)]
let mut s = g.size() as $ty;
if s == 0 {
s = s + 1;
}
g.gen_range(0, s)
}
fn shrink(&self) -> Box<Iterator<Item=$ty>> {
unsigned_shrinker!($ty);
shrinker::UnsignedShrinker::new(*self)
}
}
)*
}
}
unsigned_arbitrary! {
usize, u8, u16, u32, u64
}
macro_rules! signed_shrinker {
($ty:ty) => {
mod shrinker {
pub struct SignedShrinker {
x: $ty,
i: $ty,
}
impl SignedShrinker {
pub fn new(x: $ty) -> Box<Iterator<Item=$ty>> {
if x == 0 {
super::empty_shrinker()
} else {
let shrinker = SignedShrinker {
x: x,
i: x / 2,
};
let mut items = vec![0];
if shrinker.i < 0 {
items.push(shrinker.x.abs());
}
Box::new(items.into_iter().chain(shrinker))
}
}
}
impl Iterator for SignedShrinker {
type Item = $ty;
fn next(&mut self) -> Option<$ty> {
if (self.x - self.i).abs() < self.x.abs() {
let result = Some(self.x - self.i);
self.i = self.i / 2;
result
} else {
None
}
}
}
}
}
}
macro_rules! signed_arbitrary {
($($ty:ty),*) => {
$(
impl Arbitrary for $ty {
fn arbitrary<G: Gen>(g: &mut G) -> $ty {
let s = g.size() as $ty;
g.gen_range(-s, if s == 0 { 1 } else { s })
}
fn shrink(&self) -> Box<Iterator<Item=$ty>> {
signed_shrinker!($ty);
shrinker::SignedShrinker::new(*self)
}
}
)*
}
}
signed_arbitrary! {
isize, i8, i16, i32, i64<|fim▁hole|> let s = g.size(); g.gen_range(-(s as f32), s as f32)
}
fn shrink(&self) -> Box<Iterator<Item=f32>> {
signed_shrinker!(i32);
let it = shrinker::SignedShrinker::new(*self as i32);
Box::new(it.map(|x| x as f32))
}
}
impl Arbitrary for f64 {
fn arbitrary<G: Gen>(g: &mut G) -> f64 {
let s = g.size(); g.gen_range(-(s as f64), s as f64)
}
fn shrink(&self) -> Box<Iterator<Item=f64>> {
signed_shrinker!(i64);
let it = shrinker::SignedShrinker::new(*self as i64);
Box::new(it.map(|x| x as f64))
}
}
impl<T: Arbitrary + Clone + PartialOrd> Arbitrary for Range<T> {
fn arbitrary<G: Gen>(g: &mut G) -> Range<T> {
Arbitrary::arbitrary(g) .. Arbitrary::arbitrary(g)
}
fn shrink(&self) -> Box<Iterator<Item=Range<T>>> {
Box::new((self.start.clone(), self.end.clone()).shrink().map(|(s, e)| s .. e))
}
}
impl<T: Arbitrary + Clone + PartialOrd> Arbitrary for RangeFrom<T> {
fn arbitrary<G: Gen>(g: &mut G) -> RangeFrom<T> {
Arbitrary::arbitrary(g) ..
}
fn shrink(&self) -> Box<Iterator<Item=RangeFrom<T>>> {
Box::new(self.start.clone().shrink().map(|start| start ..))
}
}
impl<T: Arbitrary + Clone + PartialOrd> Arbitrary for RangeTo<T> {
fn arbitrary<G: Gen>(g: &mut G) -> RangeTo<T> {
.. Arbitrary::arbitrary(g)
}
fn shrink(&self) -> Box<Iterator<Item=RangeTo<T>>> {
Box::new(self.end.clone().shrink().map(|end| ..end))
}
}
impl Arbitrary for RangeFull {
fn arbitrary<G: Gen>(_: &mut G) -> RangeFull { .. }
}
#[cfg(test)]
mod test {
use rand;
use std::collections::{
BTreeMap,
BTreeSet,
BinaryHeap,
HashMap,
HashSet,
LinkedList,
VecDeque,
};
use std::fmt::Debug;
use std::hash::Hash;
use super::Arbitrary;
#[test]
fn arby_unit() {
assert_eq!(arby::<()>(), ());
}
#[test]
fn arby_int() {
rep(&mut || { let n: isize = arby(); assert!(n >= -5 && n <= 5); } );
}
#[test]
fn arby_uint() {
rep(&mut || { let n: usize = arby(); assert!(n <= 5); } );
}
fn arby<A: super::Arbitrary>() -> A {
super::Arbitrary::arbitrary(&mut gen())
}
fn gen() -> super::StdGen<rand::ThreadRng> {
super::StdGen::new(rand::thread_rng(), 5)
}
fn rep<F>(f: &mut F) where F : FnMut() -> () {
for _ in 0..100 {
f()
}
}
// Shrink testing.
#[test]
fn unit() {
eq((), vec![]);
}
#[test]
fn bools() {
eq(false, vec![]);
eq(true, vec![false]);
}
#[test]
fn options() {
eq(None::<()>, vec![]);
eq(Some(false), vec![None]);
eq(Some(true), vec![None, Some(false)]);
}
#[test]
fn results() {
// Result<A, B> doesn't implement the Hash trait, so these tests
// depends on the order of shrunk results. Ug.
// TODO: Fix this.
ordered_eq(Ok::<bool, ()>(true), vec![Ok(false)]);
ordered_eq(Err::<(), bool>(true), vec![Err(false)]);
}
#[test]
fn tuples() {
eq((false, false), vec![]);
eq((true, false), vec![(false, false)]);
eq((true, true), vec![(false, true), (true, false)]);
}
#[test]
fn triples() {
eq((false, false, false), vec![]);
eq((true, false, false), vec![(false, false, false)]);
eq((true, true, false),
vec![(false, true, false), (true, false, false)]);
}
#[test]
fn quads() {
eq((false, false, false, false), vec![]);
eq((true, false, false, false), vec![(false, false, false, false)]);
eq((true, true, false, false),
vec![(false, true, false, false), (true, false, false, false)]);
}
#[test]
fn ints() {
// TODO: Test overflow?
eq(5isize, vec![0, 3, 4]);
eq(-5isize, vec![5, 0, -3, -4]);
eq(0isize, vec![]);
}
#[test]
fn ints8() {
eq(5i8, vec![0, 3, 4]);
eq(-5i8, vec![5, 0, -3, -4]);
eq(0i8, vec![]);
}
#[test]
fn ints16() {
eq(5i16, vec![0, 3, 4]);
eq(-5i16, vec![5, 0, -3, -4]);
eq(0i16, vec![]);
}
#[test]
fn ints32() {
eq(5i32, vec![0, 3, 4]);
eq(-5i32, vec![5, 0, -3, -4]);
eq(0i32, vec![]);
}
#[test]
fn ints64() {
eq(5i64, vec![0, 3, 4]);
eq(-5i64, vec![5, 0, -3, -4]);
eq(0i64, vec![]);
}
#[test]
fn uints() {
eq(5usize, vec![0, 3, 4]);
eq(0usize, vec![]);
}
#[test]
fn uints8() {
eq(5u8, vec![0, 3, 4]);
eq(0u8, vec![]);
}
#[test]
fn uints16() {
eq(5u16, vec![0, 3, 4]);
eq(0u16, vec![]);
}
#[test]
fn uints32() {
eq(5u32, vec![0, 3, 4]);
eq(0u32, vec![]);
}
#[test]
fn uints64() {
eq(5u64, vec![0, 3, 4]);
eq(0u64, vec![]);
}
#[test]
fn vecs() {
eq({let it: Vec<isize> = vec![]; it}, vec![]);
eq({let it: Vec<Vec<isize>> = vec![vec![]]; it}, vec![vec![]]);
eq(vec![1isize], vec![vec![], vec![0]]);
eq(vec![11isize], vec![vec![], vec![0], vec![6], vec![9], vec![10]]);
eq(
vec![3isize, 5],
vec![vec![], vec![5], vec![3], vec![0,5], vec![2,5],
vec![3,0], vec![3,3], vec![3,4]]
);
}
macro_rules! map_tests {
($name:ident, $ctor:expr) => {
#[test]
fn $name() {
ordered_eq($ctor, vec![]);
{
let mut map = $ctor;
map.insert(1usize, 1isize);
let shrinks = vec![
$ctor,
{let mut m = $ctor; m.insert(0, 1); m},
{let mut m = $ctor; m.insert(1, 0); m},
];
ordered_eq(map, shrinks);
}
}
}
}
map_tests!(btreemap, BTreeMap::<usize, isize>::new());
map_tests!(hashmap, HashMap::<usize, isize>::new());
macro_rules! list_tests {
($name:ident, $ctor:expr, $push:ident) => {
#[test]
fn $name() {
ordered_eq($ctor, vec![]);
{
let mut list = $ctor;
list.$push(2usize);
let shrinks = vec![
$ctor,
{let mut m = $ctor; m.$push(0); m},
{let mut m = $ctor; m.$push(1); m},
];
ordered_eq(list, shrinks);
}
}
}
}
list_tests!(btreesets, BTreeSet::<usize>::new(), insert);
list_tests!(hashsets, HashSet::<usize>::new(), insert);
list_tests!(linkedlists, LinkedList::<usize>::new(), push_back);
list_tests!(vecdeques, VecDeque::<usize>::new(), push_back);
#[test]
fn binaryheaps() {
ordered_eq(BinaryHeap::<usize>::new().into_iter().collect::<Vec<_>>(), vec![]);
{
let mut heap = BinaryHeap::<usize>::new();
heap.push(2usize);
let shrinks = vec![
vec![],
vec![0],
vec![1],
];
ordered_eq(heap.into_iter().collect::<Vec<_>>(), shrinks);
}
}
#[test]
fn chars() {
eq('\x00', vec![]);
}
// All this jazz is for testing set equality on the results of a shrinker.
fn eq<A: Arbitrary + Eq + Debug + Hash>(s: A, v: Vec<A>) {
let (left, right) = (shrunk(s), set(v));
assert_eq!(left, right);
}
fn shrunk<A: Arbitrary + Eq + Hash>(s: A) -> HashSet<A> {
set(s.shrink().collect())
}
fn set<A: Eq + Hash>(xs: Vec<A>) -> HashSet<A> {
xs.into_iter().collect()
}
fn ordered_eq<A: Arbitrary + Eq + Debug>(s: A, v: Vec<A>) {
let (left, right) = (s.shrink().collect::<Vec<A>>(), v);
assert_eq!(left, right);
}
#[test]
fn ranges() {
ordered_eq(0..0, vec![]);
ordered_eq(1..1, vec![0..1, 1..0]);
ordered_eq(3..5, vec![0..5, 2..5, 3..0, 3..3, 3..4]);
ordered_eq(5..3, vec![0..3, 3..3, 4..3, 5..0, 5..2]);
ordered_eq(3.., vec![0.., 2..]);
ordered_eq(..3, vec![..0, ..2]);
ordered_eq(.., vec![]);
}
}<|fim▁end|>
|
}
impl Arbitrary for f32 {
fn arbitrary<G: Gen>(g: &mut G) -> f32 {
|
<|file_name|>projects.py<|end_file_name|><|fim▁begin|><|fim▁hole|>class ProjectList(generics.ListCreateAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer<|fim▁end|>
|
from core.serializers import ProjectSerializer
from rest_framework import generics
from core.models import Project
|
<|file_name|>si.js<|end_file_name|><|fim▁begin|><|fim▁hole|>oid sha256:0f924f0bd38d3dfb7ebc34016ec96c018aee7fb62e1cd562898fdeb85ca7d283
size 2234<|fim▁end|>
|
version https://git-lfs.github.com/spec/v1
|
<|file_name|>Story.ts<|end_file_name|><|fim▁begin|>import { Container } from "./Container";
import { InkObject } from "./Object";
import { JsonSerialisation } from "./JsonSerialisation";
import { StoryState } from "./StoryState";
import { ControlCommand } from "./ControlCommand";
import { PushPopType } from "./PushPop";
import { ChoicePoint } from "./ChoicePoint";
import { Choice } from "./Choice";
import { Divert } from "./Divert";
import {
Value,
StringValue,
IntValue,
DivertTargetValue,
VariablePointerValue,
ListValue,
} from "./Value";
import { Path } from "./Path";
import { Void } from "./Void";
import { Tag } from "./Tag";
import { VariableAssignment } from "./VariableAssignment";
import { VariableReference } from "./VariableReference";
import { NativeFunctionCall } from "./NativeFunctionCall";
import { StoryException } from "./StoryException";
import { PRNG } from "./PRNG";
import { StringBuilder } from "./StringBuilder";
import { ListDefinitionsOrigin } from "./ListDefinitionsOrigin";
import { ListDefinition } from "./ListDefinition";
import { Stopwatch } from "./StopWatch";
import { Pointer } from "./Pointer";
import { InkList, InkListItem, KeyValuePair } from "./InkList";
import { asOrNull, asOrThrows } from "./TypeAssertion";
import { DebugMetadata } from "./DebugMetadata";
import { throwNullException } from "./NullException";
import { SimpleJson } from "./SimpleJson";
import { ErrorHandler, ErrorType } from "./Error";
export { InkList } from "./InkList";
if (!Number.isInteger) {
Number.isInteger = function isInteger(nVal: any) {
return (
typeof nVal === "number" &&
isFinite(nVal) &&
nVal > -9007199254740992 &&
nVal < 9007199254740992 &&
Math.floor(nVal) === nVal
);
};
}
export class Story extends InkObject {
public static inkVersionCurrent = 20;
public inkVersionMinimumCompatible = 18;
get currentChoices() {
let choices: Choice[] = [];
if (this._state === null) {
return throwNullException("this._state");
}
for (let c of this._state.currentChoices) {
if (!c.isInvisibleDefault) {
c.index = choices.length;
choices.push(c);
}
}
return choices;
}
get currentText() {
this.IfAsyncWeCant("call currentText since it's a work in progress");
return this.state.currentText;
}
get currentTags() {
this.IfAsyncWeCant("call currentTags since it's a work in progress");
return this.state.currentTags;
}
get currentErrors() {
return this.state.currentErrors;
}
get currentWarnings() {
return this.state.currentWarnings;
}
get currentFlowName() {
return this.state.currentFlowName;
}
get hasError() {
return this.state.hasError;
}
get hasWarning() {
return this.state.hasWarning;
}
get variablesState() {
return this.state.variablesState;
}
get listDefinitions() {
return this._listDefinitions;
}
get state() {
return this._state;
}
public onError: ErrorHandler | null = null;
public onDidContinue: (() => void) | null = null;
public onMakeChoice: ((arg1: Choice) => void) | null = null;
public onEvaluateFunction:
| ((arg1: string, arg2: any[]) => void)
| null = null;
public onCompleteEvaluateFunction:
| ((arg1: string, arg2: any[], arg3: string, arg4: any) => void)
| null = null;
public onChoosePathString:
| ((arg1: string, arg2: any[]) => void)
| null = null;
// TODO: Implement Profiler
public StartProfiling() {
/* */
}
public EndProfiling() {
/* */
}
constructor(contentContainer: Container, lists: ListDefinition[] | null);
constructor(jsonString: string);
constructor(json: Record<string, any>);
constructor() {
super();
// Discrimination between constructors
let contentContainer: Container;
let lists: ListDefinition[] | null = null;
let json: Record<string, any> | null = null;
if (arguments[0] instanceof Container) {
contentContainer = arguments[0] as Container;
if (typeof arguments[1] !== "undefined") {
lists = arguments[1] as ListDefinition[];
}
// ------ Story (Container contentContainer, List<Runtime.ListDefinition> lists = null)
this._mainContentContainer = contentContainer;
// ------
} else {
if (typeof arguments[0] === "string") {
let jsonString = arguments[0] as string;
json = SimpleJson.TextToDictionary(jsonString);
} else {
json = arguments[0] as Record<string, any>;
}
}
// ------ Story (Container contentContainer, List<Runtime.ListDefinition> lists = null)
if (lists != null) this._listDefinitions = new ListDefinitionsOrigin(lists);
this._externals = new Map();
// ------
// ------ Story(string jsonString) : this((Container)null)
if (json !== null) {
let rootObject: Record<string, any> = json;
let versionObj = rootObject["inkVersion"];
if (versionObj == null)
throw new Error(
"ink version number not found. Are you sure it's a valid .ink.json file?"
);
let formatFromFile = parseInt(versionObj);
if (formatFromFile > Story.inkVersionCurrent) {
throw new Error(
"Version of ink used to build story was newer than the current version of the engine"
);
} else if (formatFromFile < this.inkVersionMinimumCompatible) {
throw new Error(
"Version of ink used to build story is too old to be loaded by this version of the engine"
);
} else if (formatFromFile != Story.inkVersionCurrent) {
console.warn(
"WARNING: Version of ink used to build story doesn't match current version of engine. Non-critical, but recommend synchronising."
);
}
let rootToken = rootObject["root"];
if (rootToken == null)
throw new Error(
"Root node for ink not found. Are you sure it's a valid .ink.json file?"
);
let listDefsObj;
if ((listDefsObj = rootObject["listDefs"])) {
this._listDefinitions = JsonSerialisation.JTokenToListDefinitions(
listDefsObj
);
}
this._mainContentContainer = asOrThrows(
JsonSerialisation.JTokenToRuntimeObject(rootToken),
Container
);
this.ResetState();
}
// ------
}
// Merge together `public string ToJson()` and `void ToJson(SimpleJson.Writer writer)`.
// Will only return a value if writer was not provided.
public ToJson(writer?: SimpleJson.Writer): string | void {
let shouldReturn = false;
if (!writer) {
shouldReturn = true;
writer = new SimpleJson.Writer();
}<|fim▁hole|>
writer.WriteObjectStart();
writer.WriteIntProperty("inkVersion", Story.inkVersionCurrent);
writer.WriteProperty("root", (w) =>
JsonSerialisation.WriteRuntimeContainer(w, this._mainContentContainer)
);
if (this._listDefinitions != null) {
writer.WritePropertyStart("listDefs");
writer.WriteObjectStart();
for (let def of this._listDefinitions.lists) {
writer.WritePropertyStart(def.name);
writer.WriteObjectStart();
for (let [key, value] of def.items) {
let item = InkListItem.fromSerializedKey(key);
let val = value;
writer.WriteIntProperty(item.itemName, val);
}
writer.WriteObjectEnd();
writer.WritePropertyEnd();
}
writer.WriteObjectEnd();
writer.WritePropertyEnd();
}
writer.WriteObjectEnd();
if (shouldReturn) return writer.ToString();
}
public ResetState() {
this.IfAsyncWeCant("ResetState");
this._state = new StoryState(this);
this._state.variablesState.ObserveVariableChange(
this.VariableStateDidChangeEvent.bind(this)
);
this.ResetGlobals();
}
public ResetErrors() {
if (this._state === null) {
return throwNullException("this._state");
}
this._state.ResetErrors();
}
public ResetCallstack() {
this.IfAsyncWeCant("ResetCallstack");
if (this._state === null) {
return throwNullException("this._state");
}
this._state.ForceEnd();
}
public ResetGlobals() {
if (this._mainContentContainer.namedContent.get("global decl")) {
let originalPointer = this.state.currentPointer.copy();
this.ChoosePath(new Path("global decl"), false);
this.ContinueInternal();
this.state.currentPointer = originalPointer;
}
this.state.variablesState.SnapshotDefaultGlobals();
}
public SwitchFlow(flowName: string) {
this.IfAsyncWeCant("switch flow");
if (this._asyncSaving) {
throw new Error(
"Story is already in background saving mode, can't switch flow to " +
flowName
);
}
this.state.SwitchFlow_Internal(flowName);
}
public RemoveFlow(flowName: string) {
this.state.RemoveFlow_Internal(flowName);
}
public SwitchToDefaultFlow() {
this.state.SwitchToDefaultFlow_Internal();
}
public Continue() {
this.ContinueAsync(0);
return this.currentText;
}
get canContinue() {
return this.state.canContinue;
}
get asyncContinueComplete() {
return !this._asyncContinueActive;
}
public ContinueAsync(millisecsLimitAsync: number) {
if (!this._hasValidatedExternals) this.ValidateExternalBindings();
this.ContinueInternal(millisecsLimitAsync);
}
public ContinueInternal(millisecsLimitAsync = 0) {
if (this._profiler != null) this._profiler.PreContinue();
let isAsyncTimeLimited = millisecsLimitAsync > 0;
this._recursiveContinueCount++;
if (!this._asyncContinueActive) {
this._asyncContinueActive = isAsyncTimeLimited;
if (!this.canContinue) {
throw new Error(
"Can't continue - should check canContinue before calling Continue"
);
}
this._state.didSafeExit = false;
this._state.ResetOutput();
if (this._recursiveContinueCount == 1)
this._state.variablesState.batchObservingVariableChanges = true;
}
let durationStopwatch = new Stopwatch();
durationStopwatch.Start();
let outputStreamEndsInNewline = false;
this._sawLookaheadUnsafeFunctionAfterNewline = false;
do {
try {
outputStreamEndsInNewline = this.ContinueSingleStep();
} catch (e) {
if (!(e instanceof StoryException)) throw e;
this.AddError(e.message, undefined, e.useEndLineNumber);
break;
}
if (outputStreamEndsInNewline) break;
if (
this._asyncContinueActive &&
durationStopwatch.ElapsedMilliseconds > millisecsLimitAsync
) {
break;
}
} while (this.canContinue);
durationStopwatch.Stop();
if (outputStreamEndsInNewline || !this.canContinue) {
if (this._stateSnapshotAtLastNewline !== null) {
this.RestoreStateSnapshot();
}
if (!this.canContinue) {
if (this.state.callStack.canPopThread)
this.AddError(
"Thread available to pop, threads should always be flat by the end of evaluation?"
);
if (
this.state.generatedChoices.length == 0 &&
!this.state.didSafeExit &&
this._temporaryEvaluationContainer == null
) {
if (this.state.callStack.CanPop(PushPopType.Tunnel))
this.AddError(
"unexpectedly reached end of content. Do you need a '->->' to return from a tunnel?"
);
else if (this.state.callStack.CanPop(PushPopType.Function))
this.AddError(
"unexpectedly reached end of content. Do you need a '~ return'?"
);
else if (!this.state.callStack.canPop)
this.AddError(
"ran out of content. Do you need a '-> DONE' or '-> END'?"
);
else
this.AddError(
"unexpectedly reached end of content for unknown reason. Please debug compiler!"
);
}
}
this.state.didSafeExit = false;
this._sawLookaheadUnsafeFunctionAfterNewline = false;
if (this._recursiveContinueCount == 1)
this._state.variablesState.batchObservingVariableChanges = false;
this._asyncContinueActive = false;
if (this.onDidContinue !== null) this.onDidContinue();
}
this._recursiveContinueCount--;
if (this._profiler != null) this._profiler.PostContinue();
// In the following code, we're masking a lot of non-null assertion,
// because testing for against `hasError` or `hasWarning` makes sure
// the arrays are present and contain at least one element.
if (this.state.hasError || this.state.hasWarning) {
if (this.onError !== null) {
if (this.state.hasError) {
for (let err of this.state.currentErrors!) {
this.onError(err, ErrorType.Error);
}
}
if (this.state.hasWarning) {
for (let err of this.state.currentWarnings!) {
this.onError(err, ErrorType.Warning);
}
}
this.ResetErrors();
} else {
let sb = new StringBuilder();
sb.Append("Ink had ");
if (this.state.hasError) {
sb.Append(`${this.state.currentErrors!.length}`);
sb.Append(
this.state.currentErrors!.length == 1 ? " error" : "errors"
);
if (this.state.hasWarning) sb.Append(" and ");
}
if (this.state.hasWarning) {
sb.Append(`${this.state.currentWarnings!.length}`);
sb.Append(
this.state.currentWarnings!.length == 1 ? " warning" : "warnings"
);
if (this.state.hasWarning) sb.Append(" and ");
}
sb.Append(
". It is strongly suggested that you assign an error handler to story.onError. The first issue was: "
);
sb.Append(
this.state.hasError
? this.state.currentErrors![0]
: this.state.currentWarnings![0]
);
throw new StoryException(sb.toString());
}
}
}
public ContinueSingleStep() {
if (this._profiler != null) this._profiler.PreStep();
this.Step();
if (this._profiler != null) this._profiler.PostStep();
if (!this.canContinue && !this.state.callStack.elementIsEvaluateFromGame) {
this.TryFollowDefaultInvisibleChoice();
}
if (this._profiler != null) this._profiler.PreSnapshot();
if (!this.state.inStringEvaluation) {
if (this._stateSnapshotAtLastNewline !== null) {
if (this._stateSnapshotAtLastNewline.currentTags === null) {
return throwNullException("this._stateAtLastNewline.currentTags");
}
if (this.state.currentTags === null) {
return throwNullException("this.state.currentTags");
}
let change = this.CalculateNewlineOutputStateChange(
this._stateSnapshotAtLastNewline.currentText,
this.state.currentText,
this._stateSnapshotAtLastNewline.currentTags.length,
this.state.currentTags.length
);
if (
change == Story.OutputStateChange.ExtendedBeyondNewline ||
this._sawLookaheadUnsafeFunctionAfterNewline
) {
this.RestoreStateSnapshot();
return true;
} else if (change == Story.OutputStateChange.NewlineRemoved) {
this.DiscardSnapshot();
}
}
if (this.state.outputStreamEndsInNewline) {
if (this.canContinue) {
if (this._stateSnapshotAtLastNewline == null) this.StateSnapshot();
} else {
this.DiscardSnapshot();
}
}
}
if (this._profiler != null) this._profiler.PostSnapshot();
return false;
}
public CalculateNewlineOutputStateChange(
prevText: string | null,
currText: string | null,
prevTagCount: number,
currTagCount: number
) {
if (prevText === null) {
return throwNullException("prevText");
}
if (currText === null) {
return throwNullException("currText");
}
let newlineStillExists =
currText.length >= prevText.length &&
currText.charAt(prevText.length - 1) == "\n";
if (
prevTagCount == currTagCount &&
prevText.length == currText.length &&
newlineStillExists
)
return Story.OutputStateChange.NoChange;
if (!newlineStillExists) {
return Story.OutputStateChange.NewlineRemoved;
}
if (currTagCount > prevTagCount)
return Story.OutputStateChange.ExtendedBeyondNewline;
for (let i = prevText.length; i < currText.length; i++) {
let c = currText.charAt(i);
if (c != " " && c != "\t") {
return Story.OutputStateChange.ExtendedBeyondNewline;
}
}
return Story.OutputStateChange.NoChange;
}
public ContinueMaximally() {
this.IfAsyncWeCant("ContinueMaximally");
let sb = new StringBuilder();
while (this.canContinue) {
sb.Append(this.Continue());
}
return sb.toString();
}
public ContentAtPath(path: Path) {
return this.mainContentContainer.ContentAtPath(path);
}
public KnotContainerWithName(name: string) {
let namedContainer = this.mainContentContainer.namedContent.get(name);
if (namedContainer instanceof Container) return namedContainer;
else return null;
}
public PointerAtPath(path: Path) {
if (path.length == 0) return Pointer.Null;
let p = new Pointer();
let pathLengthToUse = path.length;
let result = null;
if (path.lastComponent === null) {
return throwNullException("path.lastComponent");
}
if (path.lastComponent.isIndex) {
pathLengthToUse = path.length - 1;
result = this.mainContentContainer.ContentAtPath(
path,
undefined,
pathLengthToUse
);
p.container = result.container;
p.index = path.lastComponent.index;
} else {
result = this.mainContentContainer.ContentAtPath(path);
p.container = result.container;
p.index = -1;
}
if (
result.obj == null ||
(result.obj == this.mainContentContainer && pathLengthToUse > 0)
) {
this.Error(
"Failed to find content at path '" +
path +
"', and no approximation of it was possible."
);
} else if (result.approximate)
this.Warning(
"Failed to find content at path '" +
path +
"', so it was approximated to: '" +
result.obj.path +
"'."
);
return p;
}
public StateSnapshot() {
this._stateSnapshotAtLastNewline = this._state;
this._state = this._state.CopyAndStartPatching();
}
public RestoreStateSnapshot() {
if (this._stateSnapshotAtLastNewline === null) {
throwNullException("_stateSnapshotAtLastNewline");
}
this._stateSnapshotAtLastNewline.RestoreAfterPatch();
this._state = this._stateSnapshotAtLastNewline;
this._stateSnapshotAtLastNewline = null;
if (!this._asyncSaving) {
this._state.ApplyAnyPatch();
}
}
public DiscardSnapshot() {
if (!this._asyncSaving) this._state.ApplyAnyPatch();
this._stateSnapshotAtLastNewline = null;
}
public CopyStateForBackgroundThreadSave() {
this.IfAsyncWeCant("start saving on a background thread");
if (this._asyncSaving)
throw new Error(
"Story is already in background saving mode, can't call CopyStateForBackgroundThreadSave again!"
);
let stateToSave = this._state;
this._state = this._state.CopyAndStartPatching();
this._asyncSaving = true;
return stateToSave;
}
public BackgroundSaveComplete() {
if (this._stateSnapshotAtLastNewline === null) {
this._state.ApplyAnyPatch();
}
this._asyncSaving = false;
}
public Step() {
let shouldAddToStream = true;
let pointer = this.state.currentPointer.copy();
if (pointer.isNull) {
return;
}
// Container containerToEnter = pointer.Resolve () as Container;
let containerToEnter = asOrNull(pointer.Resolve(), Container);
while (containerToEnter) {
this.VisitContainer(containerToEnter, true);
// No content? the most we can do is step past it
if (containerToEnter.content.length == 0) {
break;
}
pointer = Pointer.StartOf(containerToEnter);
// containerToEnter = pointer.Resolve() as Container;
containerToEnter = asOrNull(pointer.Resolve(), Container);
}
this.state.currentPointer = pointer.copy();
if (this._profiler != null) this._profiler.Step(this.state.callStack);
// Is the current content object:
// - Normal content
// - Or a logic/flow statement - if so, do it
// Stop flow if we hit a stack pop when we're unable to pop (e.g. return/done statement in knot
// that was diverted to rather than called as a function)
let currentContentObj = pointer.Resolve();
let isLogicOrFlowControl = this.PerformLogicAndFlowControl(
currentContentObj
);
// Has flow been forced to end by flow control above?
if (this.state.currentPointer.isNull) {
return;
}
if (isLogicOrFlowControl) {
shouldAddToStream = false;
}
// Choice with condition?
// var choicePoint = currentContentObj as ChoicePoint;
let choicePoint = asOrNull(currentContentObj, ChoicePoint);
if (choicePoint) {
let choice = this.ProcessChoice(choicePoint);
if (choice) {
this.state.generatedChoices.push(choice);
}
currentContentObj = null;
shouldAddToStream = false;
}
// If the container has no content, then it will be
// the "content" itself, but we skip over it.
if (currentContentObj instanceof Container) {
shouldAddToStream = false;
}
// Content to add to evaluation stack or the output stream
if (shouldAddToStream) {
// If we're pushing a variable pointer onto the evaluation stack, ensure that it's specific
// to our current (possibly temporary) context index. And make a copy of the pointer
// so that we're not editing the original runtime object.
// var varPointer = currentContentObj as VariablePointerValue;
let varPointer = asOrNull(currentContentObj, VariablePointerValue);
if (varPointer && varPointer.contextIndex == -1) {
// Create new object so we're not overwriting the story's own data
let contextIdx = this.state.callStack.ContextForVariableNamed(
varPointer.variableName
);
currentContentObj = new VariablePointerValue(
varPointer.variableName,
contextIdx
);
}
// Expression evaluation content
if (this.state.inExpressionEvaluation) {
this.state.PushEvaluationStack(currentContentObj);
}
// Output stream content (i.e. not expression evaluation)
else {
this.state.PushToOutputStream(currentContentObj);
}
}
// Increment the content pointer, following diverts if necessary
this.NextContent();
// Starting a thread should be done after the increment to the content pointer,
// so that when returning from the thread, it returns to the content after this instruction.
// var controlCmd = currentContentObj as ;
let controlCmd = asOrNull(currentContentObj, ControlCommand);
if (
controlCmd &&
controlCmd.commandType == ControlCommand.CommandType.StartThread
) {
this.state.callStack.PushThread();
}
}
public VisitContainer(container: Container, atStart: boolean) {
if (!container.countingAtStartOnly || atStart) {
if (container.visitsShouldBeCounted)
this.state.IncrementVisitCountForContainer(container);
if (container.turnIndexShouldBeCounted)
this.state.RecordTurnIndexVisitToContainer(container);
}
}
private _prevContainers: Container[] = [];
public VisitChangedContainersDueToDivert() {
let previousPointer = this.state.previousPointer.copy();
let pointer = this.state.currentPointer.copy();
if (pointer.isNull || pointer.index == -1) return;
this._prevContainers.length = 0;
if (!previousPointer.isNull) {
// Container prevAncestor = previousPointer.Resolve() as Container ?? previousPointer.container as Container;
let resolvedPreviousAncestor = previousPointer.Resolve();
let prevAncestor =
asOrNull(resolvedPreviousAncestor, Container) ||
asOrNull(previousPointer.container, Container);
while (prevAncestor) {
this._prevContainers.push(prevAncestor);
// prevAncestor = prevAncestor.parent as Container;
prevAncestor = asOrNull(prevAncestor.parent, Container);
}
}
let currentChildOfContainer = pointer.Resolve();
if (currentChildOfContainer == null) return;
// Container currentContainerAncestor = currentChildOfContainer.parent as Container;
let currentContainerAncestor = asOrNull(
currentChildOfContainer.parent,
Container
);
let allChildrenEnteredAtStart = true;
while (
currentContainerAncestor &&
(this._prevContainers.indexOf(currentContainerAncestor) < 0 ||
currentContainerAncestor.countingAtStartOnly)
) {
// Check whether this ancestor container is being entered at the start,
// by checking whether the child object is the first.
let enteringAtStart =
currentContainerAncestor.content.length > 0 &&
currentChildOfContainer == currentContainerAncestor.content[0] &&
allChildrenEnteredAtStart;
if (!enteringAtStart) allChildrenEnteredAtStart = false;
// Mark a visit to this container
this.VisitContainer(currentContainerAncestor, enteringAtStart);
currentChildOfContainer = currentContainerAncestor;
// currentContainerAncestor = currentContainerAncestor.parent as Container;
currentContainerAncestor = asOrNull(
currentContainerAncestor.parent,
Container
);
}
}
public ProcessChoice(choicePoint: ChoicePoint) {
let showChoice = true;
// Don't create choice if choice point doesn't pass conditional
if (choicePoint.hasCondition) {
let conditionValue = this.state.PopEvaluationStack();
if (!this.IsTruthy(conditionValue)) {
showChoice = false;
}
}
let startText = "";
let choiceOnlyText = "";
if (choicePoint.hasChoiceOnlyContent) {
// var choiceOnlyStrVal = state.PopEvaluationStack () as StringValue;
let choiceOnlyStrVal = asOrThrows(
this.state.PopEvaluationStack(),
StringValue
);
choiceOnlyText = choiceOnlyStrVal.value || "";
}
if (choicePoint.hasStartContent) {
// var startStrVal = state.PopEvaluationStack () as StringValue;
let startStrVal = asOrThrows(
this.state.PopEvaluationStack(),
StringValue
);
startText = startStrVal.value || "";
}
// Don't create choice if player has already read this content
if (choicePoint.onceOnly) {
let visitCount = this.state.VisitCountForContainer(
choicePoint.choiceTarget
);
if (visitCount > 0) {
showChoice = false;
}
}
// We go through the full process of creating the choice above so
// that we consume the content for it, since otherwise it'll
// be shown on the output stream.
if (!showChoice) {
return null;
}
let choice = new Choice();
choice.targetPath = choicePoint.pathOnChoice;
choice.sourcePath = choicePoint.path.toString();
choice.isInvisibleDefault = choicePoint.isInvisibleDefault;
choice.threadAtGeneration = this.state.callStack.ForkThread();
choice.text = (startText + choiceOnlyText).replace(/^[ \t]+|[ \t]+$/g, "");
return choice;
}
public IsTruthy(obj: InkObject) {
let truthy = false;
if (obj instanceof Value) {
let val = obj;
if (val instanceof DivertTargetValue) {
let divTarget = val;
this.Error(
"Shouldn't use a divert target (to " +
divTarget.targetPath +
") as a conditional value. Did you intend a function call 'likeThis()' or a read count check 'likeThis'? (no arrows)"
);
return false;
}
return val.isTruthy;
}
return truthy;
}
public PerformLogicAndFlowControl(contentObj: InkObject | null) {
if (contentObj == null) {
return false;
}
// Divert
if (contentObj instanceof Divert) {
let currentDivert = contentObj;
if (currentDivert.isConditional) {
let conditionValue = this.state.PopEvaluationStack();
// False conditional? Cancel divert
if (!this.IsTruthy(conditionValue)) return true;
}
if (currentDivert.hasVariableTarget) {
let varName = currentDivert.variableDivertName;
let varContents = this.state.variablesState.GetVariableWithName(
varName
);
if (varContents == null) {
this.Error(
"Tried to divert using a target from a variable that could not be found (" +
varName +
")"
);
} else if (!(varContents instanceof DivertTargetValue)) {
// var intContent = varContents as IntValue;
let intContent = asOrNull(varContents, IntValue);
let errorMessage =
"Tried to divert to a target from a variable, but the variable (" +
varName +
") didn't contain a divert target, it ";
if (intContent instanceof IntValue && intContent.value == 0) {
errorMessage += "was empty/null (the value 0).";
} else {
errorMessage += "contained '" + varContents + "'.";
}
this.Error(errorMessage);
}
let target = asOrThrows(varContents, DivertTargetValue);
this.state.divertedPointer = this.PointerAtPath(target.targetPath);
} else if (currentDivert.isExternal) {
this.CallExternalFunction(
currentDivert.targetPathString,
currentDivert.externalArgs
);
return true;
} else {
this.state.divertedPointer = currentDivert.targetPointer.copy();
}
if (currentDivert.pushesToStack) {
this.state.callStack.Push(
currentDivert.stackPushType,
undefined,
this.state.outputStream.length
);
}
if (this.state.divertedPointer.isNull && !currentDivert.isExternal) {
if (
currentDivert &&
currentDivert.debugMetadata &&
currentDivert.debugMetadata.sourceName != null
) {
this.Error(
"Divert target doesn't exist: " +
currentDivert.debugMetadata.sourceName
);
} else {
this.Error("Divert resolution failed: " + currentDivert);
}
}
return true;
}
// Start/end an expression evaluation? Or print out the result?
else if (contentObj instanceof ControlCommand) {
let evalCommand = contentObj;
switch (evalCommand.commandType) {
case ControlCommand.CommandType.EvalStart:
this.Assert(
this.state.inExpressionEvaluation === false,
"Already in expression evaluation?"
);
this.state.inExpressionEvaluation = true;
break;
case ControlCommand.CommandType.EvalEnd:
this.Assert(
this.state.inExpressionEvaluation === true,
"Not in expression evaluation mode"
);
this.state.inExpressionEvaluation = false;
break;
case ControlCommand.CommandType.EvalOutput:
// If the expression turned out to be empty, there may not be anything on the stack
if (this.state.evaluationStack.length > 0) {
let output = this.state.PopEvaluationStack();
// Functions may evaluate to Void, in which case we skip output
if (!(output instanceof Void)) {
// TODO: Should we really always blanket convert to string?
// It would be okay to have numbers in the output stream the
// only problem is when exporting text for viewing, it skips over numbers etc.
let text = new StringValue(output.toString());
this.state.PushToOutputStream(text);
}
}
break;
case ControlCommand.CommandType.NoOp:
break;
case ControlCommand.CommandType.Duplicate:
this.state.PushEvaluationStack(this.state.PeekEvaluationStack());
break;
case ControlCommand.CommandType.PopEvaluatedValue:
this.state.PopEvaluationStack();
break;
case ControlCommand.CommandType.PopFunction:
case ControlCommand.CommandType.PopTunnel:
let popType =
evalCommand.commandType == ControlCommand.CommandType.PopFunction
? PushPopType.Function
: PushPopType.Tunnel;
let overrideTunnelReturnTarget: DivertTargetValue | null = null;
if (popType == PushPopType.Tunnel) {
let popped = this.state.PopEvaluationStack();
// overrideTunnelReturnTarget = popped as DivertTargetValue;
overrideTunnelReturnTarget = asOrNull(popped, DivertTargetValue);
if (overrideTunnelReturnTarget === null) {
this.Assert(
popped instanceof Void,
"Expected void if ->-> doesn't override target"
);
}
}
if (this.state.TryExitFunctionEvaluationFromGame()) {
break;
} else if (
this.state.callStack.currentElement.type != popType ||
!this.state.callStack.canPop
) {
let names: Map<PushPopType, string> = new Map();
names.set(
PushPopType.Function,
"function return statement (~ return)"
);
names.set(PushPopType.Tunnel, "tunnel onwards statement (->->)");
let expected = names.get(this.state.callStack.currentElement.type);
if (!this.state.callStack.canPop) {
expected = "end of flow (-> END or choice)";
}
let errorMsg =
"Found " + names.get(popType) + ", when expected " + expected;
this.Error(errorMsg);
} else {
this.state.PopCallStack();
if (overrideTunnelReturnTarget)
this.state.divertedPointer = this.PointerAtPath(
overrideTunnelReturnTarget.targetPath
);
}
break;
case ControlCommand.CommandType.BeginString:
this.state.PushToOutputStream(evalCommand);
this.Assert(
this.state.inExpressionEvaluation === true,
"Expected to be in an expression when evaluating a string"
);
this.state.inExpressionEvaluation = false;
break;
case ControlCommand.CommandType.EndString:
let contentStackForString: InkObject[] = [];
let outputCountConsumed = 0;
for (let i = this.state.outputStream.length - 1; i >= 0; --i) {
let obj = this.state.outputStream[i];
outputCountConsumed++;
// var command = obj as ControlCommand;
let command = asOrNull(obj, ControlCommand);
if (
command &&
command.commandType == ControlCommand.CommandType.BeginString
) {
break;
}
if (obj instanceof StringValue) {
contentStackForString.push(obj);
}
}
// Consume the content that was produced for this string
this.state.PopFromOutputStream(outputCountConsumed);
// The C# version uses a Stack for contentStackForString, but we're
// using a simple array, so we need to reverse it before using it
contentStackForString = contentStackForString.reverse();
// Build string out of the content we collected
let sb = new StringBuilder();
for (let c of contentStackForString) {
sb.Append(c.toString());
}
// Return to expression evaluation (from content mode)
this.state.inExpressionEvaluation = true;
this.state.PushEvaluationStack(new StringValue(sb.toString()));
break;
case ControlCommand.CommandType.ChoiceCount:
let choiceCount = this.state.generatedChoices.length;
this.state.PushEvaluationStack(new IntValue(choiceCount));
break;
case ControlCommand.CommandType.Turns:
this.state.PushEvaluationStack(
new IntValue(this.state.currentTurnIndex + 1)
);
break;
case ControlCommand.CommandType.TurnsSince:
case ControlCommand.CommandType.ReadCount:
let target = this.state.PopEvaluationStack();
if (!(target instanceof DivertTargetValue)) {
let extraNote = "";
if (target instanceof IntValue)
extraNote =
". Did you accidentally pass a read count ('knot_name') instead of a target ('-> knot_name')?";
this.Error(
"TURNS_SINCE / READ_COUNT expected a divert target (knot, stitch, label name), but saw " +
target +
extraNote
);
break;
}
// var divertTarget = target as DivertTargetValue;
let divertTarget = asOrThrows(target, DivertTargetValue);
// var container = ContentAtPath (divertTarget.targetPath).correctObj as Container;
let container = asOrNull(
this.ContentAtPath(divertTarget.targetPath).correctObj,
Container
);
let eitherCount;
if (container != null) {
if (
evalCommand.commandType == ControlCommand.CommandType.TurnsSince
)
eitherCount = this.state.TurnsSinceForContainer(container);
else eitherCount = this.state.VisitCountForContainer(container);
} else {
if (
evalCommand.commandType == ControlCommand.CommandType.TurnsSince
)
eitherCount = -1;
else eitherCount = 0;
this.Warning(
"Failed to find container for " +
evalCommand.toString() +
" lookup at " +
divertTarget.targetPath.toString()
);
}
this.state.PushEvaluationStack(new IntValue(eitherCount));
break;
case ControlCommand.CommandType.Random: {
let maxInt = asOrNull(this.state.PopEvaluationStack(), IntValue);
let minInt = asOrNull(this.state.PopEvaluationStack(), IntValue);
if (minInt == null || minInt instanceof IntValue === false)
return this.Error(
"Invalid value for minimum parameter of RANDOM(min, max)"
);
if (maxInt == null || minInt instanceof IntValue === false)
return this.Error(
"Invalid value for maximum parameter of RANDOM(min, max)"
);
// Originally a primitive type, but here, can be null.
// TODO: Replace by default value?
if (maxInt.value === null) {
return throwNullException("maxInt.value");
}
if (minInt.value === null) {
return throwNullException("minInt.value");
}
// This code is differs a bit from the reference implementation, since
// JavaScript has no true integers. Hence integer arithmetics and
// interger overflows don't apply here. A loss of precision can
// happen with big numbers however.
//
// The case where 'randomRange' is lower than zero is handled below,
// so there's no need to test against Number.MIN_SAFE_INTEGER.
let randomRange = maxInt.value - minInt.value + 1;
if (!isFinite(randomRange) || randomRange > Number.MAX_SAFE_INTEGER) {
randomRange = Number.MAX_SAFE_INTEGER;
this.Error(
"RANDOM was called with a range that exceeds the size that ink numbers can use."
);
}
if (randomRange <= 0)
this.Error(
"RANDOM was called with minimum as " +
minInt.value +
" and maximum as " +
maxInt.value +
". The maximum must be larger"
);
let resultSeed = this.state.storySeed + this.state.previousRandom;
let random = new PRNG(resultSeed);
let nextRandom = random.next();
let chosenValue = (nextRandom % randomRange) + minInt.value;
this.state.PushEvaluationStack(new IntValue(chosenValue));
// Next random number (rather than keeping the Random object around)
this.state.previousRandom = nextRandom;
break;
}
case ControlCommand.CommandType.SeedRandom:
let seed = asOrNull(this.state.PopEvaluationStack(), IntValue);
if (seed == null || seed instanceof IntValue === false)
return this.Error("Invalid value passed to SEED_RANDOM");
// Originally a primitive type, but here, can be null.
// TODO: Replace by default value?
if (seed.value === null) {
return throwNullException("minInt.value");
}
this.state.storySeed = seed.value;
this.state.previousRandom = 0;
this.state.PushEvaluationStack(new Void());
break;
case ControlCommand.CommandType.VisitIndex:
let count =
this.state.VisitCountForContainer(
this.state.currentPointer.container
) - 1; // index not count
this.state.PushEvaluationStack(new IntValue(count));
break;
case ControlCommand.CommandType.SequenceShuffleIndex:
let shuffleIndex = this.NextSequenceShuffleIndex();
this.state.PushEvaluationStack(new IntValue(shuffleIndex));
break;
case ControlCommand.CommandType.StartThread:
// Handled in main step function
break;
case ControlCommand.CommandType.Done:
// We may exist in the context of the initial
// act of creating the thread, or in the context of
// evaluating the content.
if (this.state.callStack.canPopThread) {
this.state.callStack.PopThread();
}
// In normal flow - allow safe exit without warning
else {
this.state.didSafeExit = true;
// Stop flow in current thread
this.state.currentPointer = Pointer.Null;
}
break;
// Force flow to end completely
case ControlCommand.CommandType.End:
this.state.ForceEnd();
break;
case ControlCommand.CommandType.ListFromInt:
// var intVal = state.PopEvaluationStack () as IntValue;
let intVal = asOrNull(this.state.PopEvaluationStack(), IntValue);
// var listNameVal = state.PopEvaluationStack () as StringValue;
let listNameVal = asOrThrows(
this.state.PopEvaluationStack(),
StringValue
);
if (intVal === null) {
throw new StoryException(
"Passed non-integer when creating a list element from a numerical value."
);
}
let generatedListValue = null;
if (this.listDefinitions === null) {
return throwNullException("this.listDefinitions");
}
let foundListDef = this.listDefinitions.TryListGetDefinition(
listNameVal.value,
null
);
if (foundListDef.exists) {
// Originally a primitive type, but here, can be null.
// TODO: Replace by default value?
if (intVal.value === null) {
return throwNullException("minInt.value");
}
let foundItem = foundListDef.result!.TryGetItemWithValue(
intVal.value,
InkListItem.Null
);
if (foundItem.exists) {
generatedListValue = new ListValue(
foundItem.result!,
intVal.value
);
}
} else {
throw new StoryException(
"Failed to find LIST called " + listNameVal.value
);
}
if (generatedListValue == null) generatedListValue = new ListValue();
this.state.PushEvaluationStack(generatedListValue);
break;
case ControlCommand.CommandType.ListRange:
let max = asOrNull(this.state.PopEvaluationStack(), Value);
let min = asOrNull(this.state.PopEvaluationStack(), Value);
// var targetList = state.PopEvaluationStack () as ListValue;
let targetList = asOrNull(this.state.PopEvaluationStack(), ListValue);
if (targetList === null || min === null || max === null)
throw new StoryException(
"Expected list, minimum and maximum for LIST_RANGE"
);
if (targetList.value === null) {
return throwNullException("targetList.value");
}
let result = targetList.value.ListWithSubRange(
min.valueObject,
max.valueObject
);
this.state.PushEvaluationStack(new ListValue(result));
break;
case ControlCommand.CommandType.ListRandom: {
let listVal = this.state.PopEvaluationStack() as ListValue;
if (listVal === null)
throw new StoryException("Expected list for LIST_RANDOM");
let list = listVal.value;
let newList: InkList | null = null;
if (list === null) {
throw throwNullException("list");
}
if (list.Count == 0) {
newList = new InkList();
} else {
// Generate a random index for the element to take
let resultSeed = this.state.storySeed + this.state.previousRandom;
let random = new PRNG(resultSeed);
let nextRandom = random.next();
let listItemIndex = nextRandom % list.Count;
// This bit is a little different from the original
// C# code, since iterators do not work in the same way.
// First, we iterate listItemIndex - 1 times, calling next().
// The listItemIndex-th time is made outside of the loop,
// in order to retrieve the value.
let listEnumerator = list.entries();
for (let i = 0; i <= listItemIndex - 1; i++) {
listEnumerator.next();
}
let value = listEnumerator.next().value;
let randomItem: KeyValuePair<InkListItem, number> = {
Key: InkListItem.fromSerializedKey(value[0]),
Value: value[1],
};
// Origin list is simply the origin of the one element
if (randomItem.Key.originName === null) {
return throwNullException("randomItem.Key.originName");
}
newList = new InkList(randomItem.Key.originName, this);
newList.Add(randomItem.Key, randomItem.Value);
this.state.previousRandom = nextRandom;
}
this.state.PushEvaluationStack(new ListValue(newList));
break;
}
default:
this.Error("unhandled ControlCommand: " + evalCommand);
break;
}
return true;
}
// Variable assignment
else if (contentObj instanceof VariableAssignment) {
let varAss = contentObj;
let assignedVal = this.state.PopEvaluationStack();
this.state.variablesState.Assign(varAss, assignedVal);
return true;
}
// Variable reference
else if (contentObj instanceof VariableReference) {
let varRef = contentObj;
let foundValue = null;
// Explicit read count value
if (varRef.pathForCount != null) {
let container = varRef.containerForCount;
let count = this.state.VisitCountForContainer(container);
foundValue = new IntValue(count);
}
// Normal variable reference
else {
foundValue = this.state.variablesState.GetVariableWithName(varRef.name);
if (foundValue == null) {
this.Warning(
"Variable not found: '" +
varRef.name +
"'. Using default value of 0 (false). This can happen with temporary variables if the declaration hasn't yet been hit. Globals are always given a default value on load if a value doesn't exist in the save state."
);
foundValue = new IntValue(0);
}
}
this.state.PushEvaluationStack(foundValue);
return true;
}
// Native function call
else if (contentObj instanceof NativeFunctionCall) {
let func = contentObj;
let funcParams = this.state.PopEvaluationStack(func.numberOfParameters);
let result = func.Call(funcParams);
this.state.PushEvaluationStack(result);
return true;
}
// No control content, must be ordinary content
return false;
}
public ChoosePathString(
path: string,
resetCallstack = true,
args: any[] = []
) {
this.IfAsyncWeCant("call ChoosePathString right now");
if (this.onChoosePathString !== null) this.onChoosePathString(path, args);
if (resetCallstack) {
this.ResetCallstack();
} else {
if (this.state.callStack.currentElement.type == PushPopType.Function) {
let funcDetail = "";
let container = this.state.callStack.currentElement.currentPointer
.container;
if (container != null) {
funcDetail = "(" + container.path.toString() + ") ";
}
throw new Error(
"Story was running a function " +
funcDetail +
"when you called ChoosePathString(" +
path +
") - this is almost certainly not not what you want! Full stack trace: \n" +
this.state.callStack.callStackTrace
);
}
}
this.state.PassArgumentsToEvaluationStack(args);
this.ChoosePath(new Path(path));
}
public IfAsyncWeCant(activityStr: string) {
if (this._asyncContinueActive)
throw new Error(
"Can't " +
activityStr +
". Story is in the middle of a ContinueAsync(). Make more ContinueAsync() calls or a single Continue() call beforehand."
);
}
public ChoosePath(p: Path, incrementingTurnIndex: boolean = true) {
this.state.SetChosenPath(p, incrementingTurnIndex);
// Take a note of newly visited containers for read counts etc
this.VisitChangedContainersDueToDivert();
}
public ChooseChoiceIndex(choiceIdx: number) {
choiceIdx = choiceIdx;
let choices = this.currentChoices;
this.Assert(
choiceIdx >= 0 && choiceIdx < choices.length,
"choice out of range"
);
let choiceToChoose = choices[choiceIdx];
if (this.onMakeChoice !== null) this.onMakeChoice(choiceToChoose);
if (choiceToChoose.threadAtGeneration === null) {
return throwNullException("choiceToChoose.threadAtGeneration");
}
if (choiceToChoose.targetPath === null) {
return throwNullException("choiceToChoose.targetPath");
}
this.state.callStack.currentThread = choiceToChoose.threadAtGeneration;
this.ChoosePath(choiceToChoose.targetPath);
}
public HasFunction(functionName: string) {
try {
return this.KnotContainerWithName(functionName) != null;
} catch (e) {
return false;
}
}
public EvaluateFunction(
functionName: string,
args: any[] = [],
returnTextOutput: boolean = false
): Story.EvaluateFunctionTextOutput | any {
// EvaluateFunction behaves slightly differently than the C# version.
// In C#, you can pass a (second) parameter `out textOutput` to get the
// text outputted by the function. This is not possible in js. Instead,
// we maintain the regular signature (functionName, args), plus an
// optional third parameter returnTextOutput. If set to true, we will
// return both the textOutput and the returned value, as an object.
if (this.onEvaluateFunction !== null)
this.onEvaluateFunction(functionName, args);
this.IfAsyncWeCant("evaluate a function");
if (functionName == null) {
throw new Error("Function is null");
} else if (functionName == "" || functionName.trim() == "") {
throw new Error("Function is empty or white space.");
}
let funcContainer = this.KnotContainerWithName(functionName);
if (funcContainer == null) {
throw new Error("Function doesn't exist: '" + functionName + "'");
}
let outputStreamBefore: InkObject[] = [];
outputStreamBefore.push(...this.state.outputStream);
this._state.ResetOutput();
this.state.StartFunctionEvaluationFromGame(funcContainer, args);
// Evaluate the function, and collect the string output
let stringOutput = new StringBuilder();
while (this.canContinue) {
stringOutput.Append(this.Continue());
}
let textOutput = stringOutput.toString();
this._state.ResetOutput(outputStreamBefore);
let result = this.state.CompleteFunctionEvaluationFromGame();
if (this.onCompleteEvaluateFunction != null)
this.onCompleteEvaluateFunction(functionName, args, textOutput, result);
return returnTextOutput ? { returned: result, output: textOutput } : result;
}
public EvaluateExpression(exprContainer: Container) {
let startCallStackHeight = this.state.callStack.elements.length;
this.state.callStack.Push(PushPopType.Tunnel);
this._temporaryEvaluationContainer = exprContainer;
this.state.GoToStart();
let evalStackHeight = this.state.evaluationStack.length;
this.Continue();
this._temporaryEvaluationContainer = null;
// Should have fallen off the end of the Container, which should
// have auto-popped, but just in case we didn't for some reason,
// manually pop to restore the state (including currentPath).
if (this.state.callStack.elements.length > startCallStackHeight) {
this.state.PopCallStack();
}
let endStackHeight = this.state.evaluationStack.length;
if (endStackHeight > evalStackHeight) {
return this.state.PopEvaluationStack();
} else {
return null;
}
}
public allowExternalFunctionFallbacks: boolean = false;
public CallExternalFunction(
funcName: string | null,
numberOfArguments: number
) {
if (funcName === null) {
return throwNullException("funcName");
}
let funcDef = this._externals.get(funcName);
let fallbackFunctionContainer = null;
let foundExternal = typeof funcDef !== "undefined";
if (
foundExternal &&
!funcDef!.lookAheadSafe &&
this._stateSnapshotAtLastNewline !== null
) {
this._sawLookaheadUnsafeFunctionAfterNewline = true;
return;
}
if (!foundExternal) {
if (this.allowExternalFunctionFallbacks) {
fallbackFunctionContainer = this.KnotContainerWithName(funcName);
this.Assert(
fallbackFunctionContainer !== null,
"Trying to call EXTERNAL function '" +
funcName +
"' which has not been bound, and fallback ink function could not be found."
);
// Divert direct into fallback function and we're done
this.state.callStack.Push(
PushPopType.Function,
undefined,
this.state.outputStream.length
);
this.state.divertedPointer = Pointer.StartOf(fallbackFunctionContainer);
return;
} else {
this.Assert(
false,
"Trying to call EXTERNAL function '" +
funcName +
"' which has not been bound (and ink fallbacks disabled)."
);
}
}
// Pop arguments
let args: any[] = [];
for (let i = 0; i < numberOfArguments; ++i) {
// var poppedObj = state.PopEvaluationStack () as Value;
let poppedObj = asOrThrows(this.state.PopEvaluationStack(), Value);
let valueObj = poppedObj.valueObject;
args.push(valueObj);
}
// Reverse arguments from the order they were popped,
// so they're the right way round again.
args.reverse();
// Run the function!
let funcResult = funcDef!.function(args);
// Convert return value (if any) to the a type that the ink engine can use
let returnObj = null;
if (funcResult != null) {
returnObj = Value.Create(funcResult);
this.Assert(
returnObj !== null,
"Could not create ink value from returned object of type " +
typeof funcResult
);
} else {
returnObj = new Void();
}
this.state.PushEvaluationStack(returnObj);
}
public BindExternalFunctionGeneral(
funcName: string,
func: Story.ExternalFunction,
lookaheadSafe: boolean
) {
this.IfAsyncWeCant("bind an external function");
this.Assert(
!this._externals.has(funcName),
"Function '" + funcName + "' has already been bound."
);
this._externals.set(funcName, {
function: func,
lookAheadSafe: lookaheadSafe,
});
}
public TryCoerce(value: any) {
// We're skipping type coercition in this implementation. First of, js
// is loosely typed, so it's not that important. Secondly, there is no
// clean way (AFAIK) for the user to describe what type of parameters
// they expect.
return value;
}
public BindExternalFunction(
funcName: string,
func: Story.ExternalFunction,
lookaheadSafe: boolean
) {
this.Assert(func != null, "Can't bind a null function");
this.BindExternalFunctionGeneral(
funcName,
(args: any) => {
this.Assert(
args.length >= func.length,
"External function expected " + func.length + " arguments"
);
let coercedArgs = [];
for (let i = 0, l = args.length; i < l; i++) {
coercedArgs[i] = this.TryCoerce(args[i]);
}
return func.apply(null, coercedArgs);
},
lookaheadSafe
);
}
public UnbindExternalFunction(funcName: string) {
this.IfAsyncWeCant("unbind an external a function");
this.Assert(
this._externals.has(funcName),
"Function '" + funcName + "' has not been bound."
);
this._externals.delete(funcName);
}
public ValidateExternalBindings(): void;
public ValidateExternalBindings(
c: Container | null,
missingExternals: Set<string>
): void;
public ValidateExternalBindings(
o: InkObject | null,
missingExternals: Set<string>
): void;
public ValidateExternalBindings() {
let c: Container | null = null;
let o: InkObject | null = null;
let missingExternals: Set<string> = arguments[1] || new Set();
if (arguments[0] instanceof Container) {
c = arguments[0];
}
if (arguments[0] instanceof InkObject) {
o = arguments[0];
}
if (c === null && o === null) {
this.ValidateExternalBindings(
this._mainContentContainer,
missingExternals
);
this._hasValidatedExternals = true;
// No problem! Validation complete
if (missingExternals.size == 0) {
this._hasValidatedExternals = true;
} else {
let message = "Error: Missing function binding for external";
message += missingExternals.size > 1 ? "s" : "";
message += ": '";
message += Array.from(missingExternals).join("', '");
message += "' ";
message += this.allowExternalFunctionFallbacks
? ", and no fallback ink function found."
: " (ink fallbacks disabled)";
this.Error(message);
}
} else if (c != null) {
for (let innerContent of c.content) {
let container = innerContent as Container;
if (container == null || !container.hasValidName)
this.ValidateExternalBindings(innerContent, missingExternals);
}
for (let [, value] of c.namedContent) {
this.ValidateExternalBindings(
asOrNull(value, InkObject),
missingExternals
);
}
} else if (o != null) {
let divert = asOrNull(o, Divert);
if (divert && divert.isExternal) {
let name = divert.targetPathString;
if (name === null) {
return throwNullException("name");
}
if (!this._externals.has(name)) {
if (this.allowExternalFunctionFallbacks) {
let fallbackFound = this.mainContentContainer.namedContent.has(
name
);
if (!fallbackFound) {
missingExternals.add(name);
}
} else {
missingExternals.add(name);
}
}
}
}
}
public ObserveVariable(
variableName: string,
observer: Story.VariableObserver
) {
this.IfAsyncWeCant("observe a new variable");
if (this._variableObservers === null) this._variableObservers = new Map();
if (!this.state.variablesState.GlobalVariableExistsWithName(variableName))
throw new Error(
"Cannot observe variable '" +
variableName +
"' because it wasn't declared in the ink story."
);
if (this._variableObservers.has(variableName)) {
this._variableObservers.get(variableName)!.push(observer);
} else {
this._variableObservers.set(variableName, [observer]);
}
}
public ObserveVariables(
variableNames: string[],
observers: Story.VariableObserver[]
) {
for (let i = 0, l = variableNames.length; i < l; i++) {
this.ObserveVariable(variableNames[i], observers[i]);
}
}
public RemoveVariableObserver(
observer?: Story.VariableObserver,
specificVariableName?: string
) {
// A couple of things to know about this method:
//
// 1. Since `RemoveVariableObserver` is exposed to the JavaScript world,
// optionality is marked as `undefined` rather than `null`.
// To keep things simple, null-checks are performed using regular
// equality operators, where undefined == null.
//
// 2. Since C# delegates are translated to arrays of functions,
// -= becomes a call to splice and null-checks are replaced by
// emptiness-checks.
//
this.IfAsyncWeCant("remove a variable observer");
if (this._variableObservers === null) return;
if (specificVariableName != null) {
if (this._variableObservers.has(specificVariableName)) {
if (observer != null) {
let variableObservers = this._variableObservers.get(
specificVariableName
);
if (variableObservers != null) {
variableObservers.splice(variableObservers.indexOf(observer), 1);
if (variableObservers.length === 0) {
this._variableObservers.delete(specificVariableName);
}
}
} else {
this._variableObservers.delete(specificVariableName);
}
}
} else if (observer != null) {
let keys = this._variableObservers.keys();
for (let varName of keys) {
let variableObservers = this._variableObservers.get(varName);
if (variableObservers != null) {
variableObservers.splice(variableObservers.indexOf(observer), 1);
if (variableObservers.length === 0) {
this._variableObservers.delete(varName);
}
}
}
}
}
public VariableStateDidChangeEvent(
variableName: string,
newValueObj: InkObject
) {
if (this._variableObservers === null) return;
let observers = this._variableObservers.get(variableName);
if (typeof observers !== "undefined") {
if (!(newValueObj instanceof Value)) {
throw new Error(
"Tried to get the value of a variable that isn't a standard type"
);
}
// var val = newValueObj as Value;
let val = asOrThrows(newValueObj, Value);
for (let observer of observers) {
observer(variableName, val.valueObject);
}
}
}
get globalTags() {
return this.TagsAtStartOfFlowContainerWithPathString("");
}
public TagsForContentAtPath(path: string) {
return this.TagsAtStartOfFlowContainerWithPathString(path);
}
public TagsAtStartOfFlowContainerWithPathString(pathString: string) {
let path = new Path(pathString);
let flowContainer = this.ContentAtPath(path).container;
if (flowContainer === null) {
return throwNullException("flowContainer");
}
while (true) {
let firstContent: InkObject = flowContainer.content[0];
if (firstContent instanceof Container) flowContainer = firstContent;
else break;
}
let tags: string[] | null = null;
for (let c of flowContainer.content) {
// var tag = c as Runtime.Tag;
let tag = asOrNull(c, Tag);
if (tag) {
if (tags == null) tags = [];
tags.push(tag.text);
} else break;
}
return tags;
}
public BuildStringOfHierarchy() {
let sb = new StringBuilder();
this.mainContentContainer.BuildStringOfHierarchy(
sb,
0,
this.state.currentPointer.Resolve()
);
return sb.toString();
}
public BuildStringOfContainer(container: Container) {
let sb = new StringBuilder();
container.BuildStringOfHierarchy(
sb,
0,
this.state.currentPointer.Resolve()
);
return sb.toString();
}
public NextContent() {
this.state.previousPointer = this.state.currentPointer.copy();
if (!this.state.divertedPointer.isNull) {
this.state.currentPointer = this.state.divertedPointer.copy();
this.state.divertedPointer = Pointer.Null;
this.VisitChangedContainersDueToDivert();
if (!this.state.currentPointer.isNull) {
return;
}
}
let successfulPointerIncrement = this.IncrementContentPointer();
if (!successfulPointerIncrement) {
let didPop = false;
if (this.state.callStack.CanPop(PushPopType.Function)) {
this.state.PopCallStack(PushPopType.Function);
if (this.state.inExpressionEvaluation) {
this.state.PushEvaluationStack(new Void());
}
didPop = true;
} else if (this.state.callStack.canPopThread) {
this.state.callStack.PopThread();
didPop = true;
} else {
this.state.TryExitFunctionEvaluationFromGame();
}
if (didPop && !this.state.currentPointer.isNull) {
this.NextContent();
}
}
}
public IncrementContentPointer() {
let successfulIncrement = true;
let pointer = this.state.callStack.currentElement.currentPointer.copy();
pointer.index++;
if (pointer.container === null) {
return throwNullException("pointer.container");
}
while (pointer.index >= pointer.container.content.length) {
successfulIncrement = false;
// Container nextAncestor = pointer.container.parent as Container;
let nextAncestor = asOrNull(pointer.container.parent, Container);
if (nextAncestor instanceof Container === false) {
break;
}
let indexInAncestor = nextAncestor!.content.indexOf(pointer.container);
if (indexInAncestor == -1) {
break;
}
pointer = new Pointer(nextAncestor, indexInAncestor);
pointer.index++;
successfulIncrement = true;
if (pointer.container === null) {
return throwNullException("pointer.container");
}
}
if (!successfulIncrement) pointer = Pointer.Null;
this.state.callStack.currentElement.currentPointer = pointer.copy();
return successfulIncrement;
}
public TryFollowDefaultInvisibleChoice() {
let allChoices = this._state.currentChoices;
let invisibleChoices = allChoices.filter((c) => c.isInvisibleDefault);
if (
invisibleChoices.length == 0 ||
allChoices.length > invisibleChoices.length
)
return false;
let choice = invisibleChoices[0];
if (choice.targetPath === null) {
return throwNullException("choice.targetPath");
}
if (choice.threadAtGeneration === null) {
return throwNullException("choice.threadAtGeneration");
}
this.state.callStack.currentThread = choice.threadAtGeneration;
if (this._stateSnapshotAtLastNewline !== null) {
this.state.callStack.currentThread = this.state.callStack.ForkThread();
}
this.ChoosePath(choice.targetPath, false);
return true;
}
public NextSequenceShuffleIndex() {
// var numElementsIntVal = state.PopEvaluationStack () as IntValue;
let numElementsIntVal = asOrNull(this.state.PopEvaluationStack(), IntValue);
if (!(numElementsIntVal instanceof IntValue)) {
this.Error("expected number of elements in sequence for shuffle index");
return 0;
}
let seqContainer = this.state.currentPointer.container;
if (seqContainer === null) {
return throwNullException("seqContainer");
}
// Originally a primitive type, but here, can be null.
// TODO: Replace by default value?
if (numElementsIntVal.value === null) {
return throwNullException("numElementsIntVal.value");
}
let numElements = numElementsIntVal.value;
// var seqCountVal = state.PopEvaluationStack () as IntValue;
let seqCountVal = asOrThrows(this.state.PopEvaluationStack(), IntValue);
let seqCount = seqCountVal.value;
// Originally a primitive type, but here, can be null.
// TODO: Replace by default value?
if (seqCount === null) {
return throwNullException("seqCount");
}
let loopIndex = seqCount / numElements;
let iterationIndex = seqCount % numElements;
let seqPathStr = seqContainer.path.toString();
let sequenceHash = 0;
for (let i = 0, l = seqPathStr.length; i < l; i++) {
sequenceHash += seqPathStr.charCodeAt(i) || 0;
}
let randomSeed = sequenceHash + loopIndex + this.state.storySeed;
let random = new PRNG(Math.floor(randomSeed));
let unpickedIndices = [];
for (let i = 0; i < numElements; ++i) {
unpickedIndices.push(i);
}
for (let i = 0; i <= iterationIndex; ++i) {
let chosen = random.next() % unpickedIndices.length;
let chosenIndex = unpickedIndices[chosen];
unpickedIndices.splice(chosen, 1);
if (i == iterationIndex) {
return chosenIndex;
}
}
throw new Error("Should never reach here");
}
public Error(message: string, useEndLineNumber = false): never {
let e = new StoryException(message);
e.useEndLineNumber = useEndLineNumber;
throw e;
}
public Warning(message: string) {
this.AddError(message, true);
}
public AddError(
message: string,
isWarning = false,
useEndLineNumber = false
) {
let dm = this.currentDebugMetadata;
let errorTypeStr = isWarning ? "WARNING" : "ERROR";
if (dm != null) {
let lineNum = useEndLineNumber ? dm.endLineNumber : dm.startLineNumber;
message =
"RUNTIME " +
errorTypeStr +
": '" +
dm.fileName +
"' line " +
lineNum +
": " +
message;
} else if (!this.state.currentPointer.isNull) {
message =
"RUNTIME " +
errorTypeStr +
": (" +
this.state.currentPointer +
"): " +
message;
} else {
message = "RUNTIME " + errorTypeStr + ": " + message;
}
this.state.AddError(message, isWarning);
// In a broken state don't need to know about any other errors.
if (!isWarning) this.state.ForceEnd();
}
public Assert(condition: boolean, message: string | null = null) {
if (condition == false) {
if (message == null) {
message = "Story assert";
}
throw new Error(message + " " + this.currentDebugMetadata);
}
}
get currentDebugMetadata(): DebugMetadata | null {
let dm: DebugMetadata | null;
let pointer = this.state.currentPointer;
if (!pointer.isNull && pointer.Resolve() !== null) {
dm = pointer.Resolve()!.debugMetadata;
if (dm !== null) {
return dm;
}
}
for (let i = this.state.callStack.elements.length - 1; i >= 0; --i) {
pointer = this.state.callStack.elements[i].currentPointer;
if (!pointer.isNull && pointer.Resolve() !== null) {
dm = pointer.Resolve()!.debugMetadata;
if (dm !== null) {
return dm;
}
}
}
for (let i = this.state.outputStream.length - 1; i >= 0; --i) {
let outputObj = this.state.outputStream[i];
dm = outputObj.debugMetadata;
if (dm !== null) {
return dm;
}
}
return null;
}
get mainContentContainer() {
if (this._temporaryEvaluationContainer) {
return this._temporaryEvaluationContainer;
} else {
return this._mainContentContainer;
}
}
/**
* `_mainContentContainer` is almost guaranteed to be set in the
* constructor, unless the json is malformed.
*/
private _mainContentContainer!: Container;
private _listDefinitions: ListDefinitionsOrigin | null = null;
private _externals: Map<string, Story.ExternalFunctionDef>;
private _variableObservers: Map<
string,
Story.VariableObserver[]
> | null = null;
private _hasValidatedExternals: boolean = false;
private _temporaryEvaluationContainer: Container | null = null;
/**
* `state` is almost guaranteed to be set in the constructor, unless
* using the compiler-specific constructor which will likely not be used in
* the real world.
*/
private _state!: StoryState;
private _asyncContinueActive: boolean = false;
private _stateSnapshotAtLastNewline: StoryState | null = null;
private _sawLookaheadUnsafeFunctionAfterNewline: boolean = false;
private _recursiveContinueCount: number = 0;
private _asyncSaving: boolean = false;
private _profiler: any | null = null; // TODO: Profiler
}
export namespace Story {
export enum OutputStateChange {
NoChange = 0,
ExtendedBeyondNewline = 1,
NewlineRemoved = 2,
}
export interface EvaluateFunctionTextOutput {
returned: any;
output: string;
}
export interface ExternalFunctionDef {
function: ExternalFunction;
lookAheadSafe: boolean;
}
export type VariableObserver = (variableName: string, newValue: any) => void;
export type ExternalFunction = (...args: any) => any;
}<|fim▁end|>
| |
<|file_name|>LoginActivity.java<|end_file_name|><|fim▁begin|>package no.westerdals.eksamen.app2;
import android.content.DialogInterface;
import android.content.Intent;
import android.support.annotation.NonNull;
import android.support.v7.app.AlertDialog;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.text.TextUtils;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import android.widget.ViewSwitcher;
import com.google.android.gms.tasks.OnCompleteListener;
import com.google.android.gms.tasks.Task;
import com.google.firebase.auth.AuthResult;
import com.google.firebase.auth.FirebaseAuth;
import com.google.firebase.auth.FirebaseUser;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import no.westerdals.eksamen.app2.Model.User;
public class LoginActivity extends AppCompatActivity implements View.OnClickListener {
private FirebaseAuth mAuth;
private DatabaseReference mDatabase;
EditText editTextUser;
EditText editTextPass;
Button loginButton;
TextView signUpLink;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_login);
editTextUser = (EditText) findViewById(R.id.email_address);
editTextPass = (EditText) findViewById(R.id.password_edit_text);
loginButton = (Button) findViewById(R.id.btn_login);
loginButton.setOnClickListener(this);
signUpLink = (TextView) findViewById(R.id.link_signup);
signUpLink.setOnClickListener(this);
mDatabase = FirebaseDatabase.getInstance().getReference();
mAuth = FirebaseAuth.getInstance();
}
@Override
public void onStart() {
super.onStart();
FirebaseUser currentUser = mAuth.getCurrentUser();
//opens main activity if already signed in
if (currentUser != null) {
Intent intent = new Intent(LoginActivity.this, MainActivity.class);
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
LoginActivity.this.startActivity(intent);
finish();
}
}
private void signIn(String email, String password) {
if (!validateForm()) {
return;
}
mAuth.signInWithEmailAndPassword(email, password)
.addOnCompleteListener(this, new OnCompleteListener<AuthResult>() {
@Override
public void onComplete(@NonNull Task<AuthResult> task) {
if (task.isSuccessful()) {
//FirebaseUser user = mAuth.getCurrentUser();
Intent intent = new Intent(LoginActivity.this, MainActivity.class);
LoginActivity.this.startActivity(intent);
} else {
Toast.makeText(LoginActivity.this, "Authentication failed.",
Toast.LENGTH_SHORT).show();
}
}
});
}
/**
* Signup is only with username and password.
* Room number will be connected to user id in the reception.
*/
private void createAccount(String email, String password) {
mAuth.createUserWithEmailAndPassword(email, password)
.addOnCompleteListener(this, new OnCompleteListener<AuthResult>() {
@Override
public void onComplete(@NonNull Task<AuthResult> task) {
if (task.isSuccessful()) {
FirebaseUser user = mAuth.getCurrentUser();
Toast.makeText(LoginActivity.this, "User: " + user.getEmail() + " created",
Toast.LENGTH_SHORT).show();
registerUserDetails(user);
} else {
// If sign in fails, display a message to the user.
Toast.makeText(LoginActivity.this, "Authentication failed.",
Toast.LENGTH_SHORT).show();
}
}
});
}
private void registerUserDetails(FirebaseUser user) {
writeNewUser(user.getUid(), user.getEmail(), "");
}
private void writeNewUser(String userId, String email, String roomNumber) {
User user = new User(email, roomNumber);
mDatabase.child("users").child(userId).setValue(user);
}
//TODO: make better validation on email etc
private boolean validateForm() {
boolean validLogin = true;
String email = editTextUser.getText().toString();
String password = editTextPass.getText().toString();
if (TextUtils.isEmpty(email)) {
editTextUser.setError("Email address is required");
validLogin = false;
} else
editTextUser.setError(null);
if (TextUtils.isEmpty(password)) {
editTextPass.setError(" Password is required");
validLogin = false;
} else
editTextPass.setError(null);
return validLogin;
}
@Override
public void onClick(View v) {
if (v.getId() == R.id.btn_login) {
signIn(editTextUser.getText().toString(), editTextPass.getText().toString());
} else if (v.getId() == R.id.link_signup) {
//createAccount(editTextUser.getText().toString(), editTextPass.getText().toString());
signupForm();
}
}
public void signupForm() {
final AlertDialog.Builder signUpDialog = new AlertDialog.Builder(this);
LayoutInflater inflater = (LayoutInflater) getSystemService(LAYOUT_INFLATER_SERVICE);
View layout = inflater.inflate(R.layout.sign_up_form,(ViewGroup) findViewById(R.id.sign_up_form));
signUpDialog.setView(layout);
final EditText emailAddress = (EditText) layout.findViewById(R.id.email_address);
final EditText repeatEmailAddress = (EditText) layout.findViewById(R.id.repeat_email_address);
final EditText password = (EditText) layout.findViewById(R.id.password_edit_text);
final EditText repeatPassword = (EditText) layout.findViewById(R.id.repeat_password_edit_text);
Button button = (Button) layout.findViewById(R.id.btn_sign_up);
signUpDialog.setNegativeButton("Close", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
}
});
signUpDialog.create();
final AlertDialog d = signUpDialog.show();
button.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if(validateSignUpForm(password, repeatPassword, emailAddress, repeatEmailAddress)) {
createAccount(emailAddress.getText().toString(), password.getText().toString());
d.dismiss();
}
}
});
}
private boolean validateSignUpForm(EditText password, EditText repeatPassword, EditText emailAdress, EditText repeatEmailAdress){
boolean validated = true;
if (!TextUtils.equals(password.getText(), repeatPassword.getText())){
password.setError("Passwords not matching");
repeatPassword.setError("Passwords not matching");
validated = false;
} else if(TextUtils.isEmpty(password.getText())){
password.setError("Cannot be empty");
validated = false;
}else if(TextUtils.isEmpty(repeatPassword.getText())){
repeatPassword.setError("Cannot be empty");
validated = false;
} else {
password.setError(null);
repeatPassword.setError(null);
}<|fim▁hole|> emailAdress.setError("Email not matching");
repeatEmailAdress.setError("Email not matching");
validated = false;
} else if(TextUtils.isEmpty(emailAdress.getText())){
emailAdress.setError("Cannot be empty");
validated = false;
}else if(TextUtils.isEmpty(repeatEmailAdress.getText())){
repeatEmailAdress.setError("Cannot be empty");
validated = false;
} else {
emailAdress.setError(null);
repeatEmailAdress.setError(null);
}
return validated;
}
}<|fim▁end|>
|
if (!TextUtils.equals(emailAdress.getText(), repeatEmailAdress.getText())){
|
<|file_name|>request.rs<|end_file_name|><|fim▁begin|>//! The module that contains the request code.
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
use std::io::Error;
use std::str;
use std::sync::mpsc::channel;
use std::time::Duration;
use Method;
use curl::easy::{Easy, List};
use futures::{BoxFuture, failed, Future};
use response::Response;
use tokio_core::reactor::Handle;
use tokio_curl::Session;
use url::Url;
#[cfg(feature = "rustc-serialization")]
use rustc_serialize;
#[cfg(feature = "serde-serialization")]
use serde;
#[cfg(feature = "serde-serialization")]
use serde_json;
/// The default low byte rate threshold.
///
/// See [`Request::lowspeed_limit`](struct.Request.html#method.lowspeed_limit)
/// for more information.
pub const LOW_SPEED_LIMIT: u32 = 10;
/// The default low speed time threshold in seconds.
///
/// See [`Request::lowspeed_limit`](struct.Request.html#method.lowspeed_limit)
/// for more information.
pub const LOW_SPEED_TIME: u32 = 10;
/// The default redirect threshold for a single request.
///
/// cURL will follow this many redirects by default before aborting
/// the request. See [`Request::max_redirects`](struct.Request.html#method.max_redirects)
/// for more information.
pub const MAX_REDIRECTS: u32 = 10;
/// Represents an HTTP request.
///
/// While this can be used directly (and _must_ be for special HTTP verbs, it is
/// preferred to use the [`get`](fn.get.html), [`post`](fn.post.html), etc. functions
/// since they are shorter.
pub struct Request {
body: Option<Vec<u8>>,
follow_redirects: bool,
handle: Option<Easy>,
headers: Vec<(String, String)>,
lowspeed_limits: Option<(u32, Duration)>,
max_redirects: u32,
method: Method,
params: Vec<(String, String)>,
timeout: Option<Duration>,
url: Url
}
impl Request {
/// Creates a new instance of `Request`.
pub fn new(url: &Url, method: Method) -> Self {
Request {
body: None,
follow_redirects: true,
handle: None,
headers: Vec::new(),
lowspeed_limits: Some((LOW_SPEED_LIMIT, Duration::from_secs(LOW_SPEED_TIME as u64))),
max_redirects: MAX_REDIRECTS,
method: method,
params: Vec::new(),
timeout: None,
url: url.clone()
}
}
/// Sets the body of the request as raw byte array.
pub fn body<B: Into<Vec<u8>>>(mut self, body: B) -> Self {
self.body = Some(body.into());
self
}
/// Sets the option whether to follow 3xx-redirects or not.
///
/// Defaults to `true`.
pub fn follow_redirects(mut self, follow: bool) -> Self {
self.follow_redirects = follow;
self
}
/// Adds an HTTP header to the request.
pub fn header(mut self, name: &str, value: &str) -> Self {
self.headers.push((name.to_owned(), value.to_owned()));
self
}
/// Sets the given request headers.
///
/// This overwrites all previously set headers.
pub fn headers(mut self, headers: Vec<(String, String)>) -> Self {
self.headers = headers;
self
}
/// Serializes the given object to JSON and uses that as the request body.
/// Also automatically sets the `Content-Type` to `application/json`.
///
/// ## Panics
/// Panics if serialization is not successful.
#[cfg(feature = "rustc-serialization")]
pub fn json<T: rustc_serialize::Encodable>(self, body: &T) -> Self {
self.set_json(rustc_serialize::json::encode(body).unwrap().into_bytes())
}
/// Serializes the given object to JSON and uses that as the request body.
/// Also automatically sets the `Content-Type` to `application/json`.
///
/// ## Panics
/// Panics if serialization is not successful.
#[cfg(feature = "serde-serialization")]
pub fn json<T: serde::Serialize>(self, body: &T) -> Self {
self.set_json(serde_json::to_vec(body).unwrap())
}
/// Sets the thresholds which, when reached, aborts a download due to too
/// low speeds.
///
/// Pass 0 for either parameter to disable lowspeed limiting.
///
/// ## Remarks
/// `bytes` sets the minimum average amount of bytes transferred in `per_duration`
/// time. If this number is not reached, cURL will abort the transfer because the transfer
/// speed is too low.
///
/// The values here default to [`LOW_SPEED_LIMIT`](constant.LOW_SPEED_LIMIT.html) and
/// [`LOW_SPEED_TIME`](constant.LOW_SPEED_TIME.html).
pub fn lowspeed_limit(mut self, bytes: u32, per_duration: Duration) -> Self {
self.lowspeed_limits = if bytes > 0 && per_duration > Duration::from_secs(0) {
Some((bytes, per_duration))
} else {
None
};
self
}
/// Sets the maximum amount of redirects cURL will follow when
/// [`Request::follow_redirects`](#method.follow_redirects) is
/// enabled.
pub fn max_redirects(mut self, max_redirects: u32) -> Self {
self.max_redirects = max_redirects;
self
}
/// Adds a URL parameter to the request.
pub fn param(mut self, name: &str, value: &str) -> Self {
self.params.push((name.to_owned(), value.to_owned()));
self
}
/// Sets the given request URL parameters.
///
/// This overwrites all previously set parameters.
pub fn params(mut self, params: Vec<(String, String)>) -> Self {
self.params = params;
self
}
/// Creates a new `Session` on the specified event loop to send the HTTP request through
/// and returns a future that fires off the request, parses the response and resolves to
/// a `Response`-struct on success.
///
/// ## Panics
/// Panics in case of native exceptions in cURL.
pub fn send(self, h: Handle) -> BoxFuture<Response, Error> {
self.send_with_session(&Session::new(h))
}
/// Uses the given `Session` to send the HTTP request through and returns a future that
/// fires off the request, parses the response and resolves to a `Response`-struct on success.
///
/// ## Panics
/// Panics in case of native exceptions in cURL.
pub fn send_with_session(mut self, session: &Session) -> BoxFuture<Response, Error> {
{
let mut query_pairs = self.url.query_pairs_mut();
for (key, value) in self.params {
query_pairs.append_pair(key.trim(), value.trim());
}
}
let headers = {
let mut list = List::new();
for (key, value) in self.headers {
list.append(&format!("{}: {}", key.trim(), value.trim())).expect("Failed to append header value to (native cURL) header list.");
}
list
};
let mut easy = self.handle.unwrap_or_else(|| Easy::new());
let (header_tx, header_rx) = channel();
let (body_tx, body_rx) = channel();
let config_res = {
// Make the borrow checker happy
let body = self.body;
let follow_redirects = self.follow_redirects;
let lowspeed_limits = self.lowspeed_limits;
let max_redirects = self.max_redirects;
let method = self.method;
let timeout = self.timeout;
let url = self.url;
let mut first_header = true;
// We cannot use try! here, since we're dealing with futures, not with Results
Ok(())
.and_then(|_| easy.accept_encoding(""))
.and_then(|_| easy.custom_request(method.as_ref()))
.and_then(|_| if follow_redirects {
easy.follow_location(true)
.and_then(|_| easy.max_redirections(max_redirects))
} else {
Ok(())
})
.and_then(|_| easy.header_function(move |header| {
match str::from_utf8(header) {
Ok(s) => {
let s = s.trim(); // Headers are \n-separated
if !first_header && s.len() > 0 { // First header is HTTP status line, don't want that
let _ = header_tx.send(s.to_owned());
}
first_header = false;
true
},
Err(_) => false
}
}))
.and_then(|_| easy.http_headers(headers))
.and_then(|_| if let Some((bytes, per_time)) = lowspeed_limits {
easy.low_speed_limit(bytes)
.and_then(|_| easy.low_speed_time(per_time))
} else {
Ok(())
})
.and_then(|_| if method == Method::Head {
easy.nobody(true)
} else {
Ok(())
})
.and_then(|_| if let Some(ref body) = body {
easy.post_fields_copy(body)
} else {
Ok(())
})
.and_then(|_| if let Some(timeout) = timeout {
easy.timeout(timeout)
} else {
Ok(())
})
.and_then(|_| easy.url(url.as_str()))
.and_then(|_| easy.write_function(move |data| {
let _ = body_tx.send(Vec::from(data));
Ok(data.len())
}))
};
match config_res {<|fim▁hole|> // we could shorten this code to two lines.
let body = {
let mut b = Vec::new();
while let Ok(item) = body_rx.try_recv() {
b.extend(item);
}
b
};
let headers = {
let mut h = Vec::new();
while let Ok(hdr) = header_rx.try_recv() {
h.push(hdr);
}
h
};
Response::new(ez, headers, body)
})
.boxed(),
Err(error) => failed(error.into()).boxed()
}
}
/// Set the maximum time the request is allowed to take.
///
/// Disabled by default in favor of [`lowspeed_limit`]
pub fn timeout(mut self, duration: Duration) -> Self {
self.timeout = Some(duration);
self
}
/// Uses the given cURL handle in the request process reusing its resources
/// and improving performance.
///
/// This is solely a way to improve performance, it is not necessary to call
/// this method prior to firing off the request. The easy handle will be created
/// automatically if necessary.
pub fn use_handle(mut self, handle: Easy) -> Self {
self.handle = Some(handle);
self
}
#[cfg(any(feature = "rustc-serialization", feature = "serde-serialization"))]
fn set_json(mut self, body: Vec<u8>) -> Self {
self.body = Some(body);
self.header("Content-Type", "application/json")
}
}
impl Debug for Request {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
let len = if let Some(ref body) = self.body {
body.len() as isize
} else {
-1isize
};
fmt.debug_struct(stringify!(Request))
.field("body_len", &len)
.field("follow_redirects", &self.follow_redirects)
.field("headers", &self.headers)
.field("method", &self.method)
.field("params", &self.params)
.field("reuses_handle", &self.handle.is_some())
.field("url", &self.url)
.finish()
}
}
impl Display for Request {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
write!(fmt, "{} {}", self.method, self.url)
}
}<|fim▁end|>
|
Ok(_) => session.perform(easy)
.map_err(|err| err.into_error())
.map(move |ez| {
// In an ideal world where receiver_try_iter is stable
|
<|file_name|>bubble-map.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnDestroy } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { combineLatest } from 'rxjs';
import { takeWhile } from 'rxjs/operators';
import { NgxEchartsService } from 'ngx-echarts';
import { NbThemeService } from '@nebular/theme';
@Component({
selector: 'ngx-bubble-map',
styleUrls: ['./bubble-map.component.scss'],
template: `
<nb-card>
<nb-card-header>Bubble Maps</nb-card-header>
<nb-card-body>
<div echarts [options]="options" class="echarts"></div>
</nb-card-body>
</nb-card>
`,
})
export class BubbleMapComponent implements OnDestroy {
latlong: any = {};
mapData: any[];
max = -Infinity;
min = Infinity;
options: any;
bubbleTheme: any;
geoColors: any[];
private alive = true;
constructor(private theme: NbThemeService,
private http: HttpClient,
private es: NgxEchartsService) {
combineLatest([
this.http.get('assets/map/world.json'),
this.theme.getJsTheme(),
])
.pipe(takeWhile(() => this.alive))
.subscribe(([map, config]: [any, any]) => {
this.es.registerMap('world', map);
const colors = config.variables;
this.bubbleTheme = config.variables.bubbleMap;
this.geoColors = [colors.primary, colors.info, colors.success, colors.warning, colors.danger];
this.latlong = {
'AD': { 'latitude': 42.5, 'longitude': 1.5 },
'AE': { 'latitude': 24, 'longitude': 54 },
'AF': { 'latitude': 33, 'longitude': 65 },
'AG': { 'latitude': 17.05, 'longitude': -61.8 },
'AI': { 'latitude': 18.25, 'longitude': -63.1667 },
'AL': { 'latitude': 41, 'longitude': 20 },
'AM': { 'latitude': 40, 'longitude': 45 },
'AN': { 'latitude': 12.25, 'longitude': -68.75 },
'AO': { 'latitude': -12.5, 'longitude': 18.5 },
'AP': { 'latitude': 35, 'longitude': 105 },
'AQ': { 'latitude': -90, 'longitude': 0 },
'AR': { 'latitude': -34, 'longitude': -64 },
'AS': { 'latitude': -14.3333, 'longitude': -170 },
'AT': { 'latitude': 47.3333, 'longitude': 13.3333 },
'AU': { 'latitude': -27, 'longitude': 133 },
'AW': { 'latitude': 12.5, 'longitude': -69.9667 },
'AZ': { 'latitude': 40.5, 'longitude': 47.5 },
'BA': { 'latitude': 44, 'longitude': 18 },
'BB': { 'latitude': 13.1667, 'longitude': -59.5333 },
'BD': { 'latitude': 24, 'longitude': 90 },
'BE': { 'latitude': 50.8333, 'longitude': 4 },
'BF': { 'latitude': 13, 'longitude': -2 },
'BG': { 'latitude': 43, 'longitude': 25 },
'BH': { 'latitude': 26, 'longitude': 50.55 },
'BI': { 'latitude': -3.5, 'longitude': 30 },
'BJ': { 'latitude': 9.5, 'longitude': 2.25 },
'BM': { 'latitude': 32.3333, 'longitude': -64.75 },
'BN': { 'latitude': 4.5, 'longitude': 114.6667 },
'BO': { 'latitude': -17, 'longitude': -65 },
'BR': { 'latitude': -10, 'longitude': -55 },
'BS': { 'latitude': 24.25, 'longitude': -76 },
'BT': { 'latitude': 27.5, 'longitude': 90.5 },
'BV': { 'latitude': -54.4333, 'longitude': 3.4 },
'BW': { 'latitude': -22, 'longitude': 24 },
'BY': { 'latitude': 53, 'longitude': 28 },
'BZ': { 'latitude': 17.25, 'longitude': -88.75 },
'CA': { 'latitude': 54, 'longitude': -100 },
'CC': { 'latitude': -12.5, 'longitude': 96.8333 },
'CD': { 'latitude': 0, 'longitude': 25 },
'CF': { 'latitude': 7, 'longitude': 21 },
'CG': { 'latitude': -1, 'longitude': 15 },
'CH': { 'latitude': 47, 'longitude': 8 },
'CI': { 'latitude': 8, 'longitude': -5 },
'CK': { 'latitude': -21.2333, 'longitude': -159.7667 },
'CL': { 'latitude': -30, 'longitude': -71 },
'CM': { 'latitude': 6, 'longitude': 12 },
'CN': { 'latitude': 35, 'longitude': 105 },
'CO': { 'latitude': 4, 'longitude': -72 },
'CR': { 'latitude': 10, 'longitude': -84 },
'CU': { 'latitude': 21.5, 'longitude': -80 },
'CV': { 'latitude': 16, 'longitude': -24 },
'CX': { 'latitude': -10.5, 'longitude': 105.6667 },
'CY': { 'latitude': 35, 'longitude': 33 },
'CZ': { 'latitude': 49.75, 'longitude': 15.5 },
'DE': { 'latitude': 51, 'longitude': 9 },
'DJ': { 'latitude': 11.5, 'longitude': 43 },
'DK': { 'latitude': 56, 'longitude': 10 },
'DM': { 'latitude': 15.4167, 'longitude': -61.3333 },
'DO': { 'latitude': 19, 'longitude': -70.6667 },
'DZ': { 'latitude': 28, 'longitude': 3 },
'EC': { 'latitude': -2, 'longitude': -77.5 },
'EE': { 'latitude': 59, 'longitude': 26 },
'EG': { 'latitude': 27, 'longitude': 30 },
'EH': { 'latitude': 24.5, 'longitude': -13 },
'ER': { 'latitude': 15, 'longitude': 39 },
'ES': { 'latitude': 40, 'longitude': -4 },
'ET': { 'latitude': 8, 'longitude': 38 },
'EU': { 'latitude': 47, 'longitude': 8 },
'FI': { 'latitude': 62, 'longitude': 26 },
'FJ': { 'latitude': -18, 'longitude': 175 },
'FK': { 'latitude': -51.75, 'longitude': -59 },
'FM': { 'latitude': 6.9167, 'longitude': 158.25 },
'FO': { 'latitude': 62, 'longitude': -7 },
'FR': { 'latitude': 46, 'longitude': 2 },
'GA': { 'latitude': -1, 'longitude': 11.75 },
'GB': { 'latitude': 54, 'longitude': -2 },
'GD': { 'latitude': 12.1167, 'longitude': -61.6667 },
'GE': { 'latitude': 42, 'longitude': 43.5 },
'GF': { 'latitude': 4, 'longitude': -53 },
'GH': { 'latitude': 8, 'longitude': -2 },
'GI': { 'latitude': 36.1833, 'longitude': -5.3667 },
'GL': { 'latitude': 72, 'longitude': -40 },
'GM': { 'latitude': 13.4667, 'longitude': -16.5667 },
'GN': { 'latitude': 11, 'longitude': -10 },
'GP': { 'latitude': 16.25, 'longitude': -61.5833 },
'GQ': { 'latitude': 2, 'longitude': 10 },
'GR': { 'latitude': 39, 'longitude': 22 },
'GS': { 'latitude': -54.5, 'longitude': -37 },
'GT': { 'latitude': 15.5, 'longitude': -90.25 },
'GU': { 'latitude': 13.4667, 'longitude': 144.7833 },
'GW': { 'latitude': 12, 'longitude': -15 },
'GY': { 'latitude': 5, 'longitude': -59 },
'HK': { 'latitude': 22.25, 'longitude': 114.1667 },
'HM': { 'latitude': -53.1, 'longitude': 72.5167 },
'HN': { 'latitude': 15, 'longitude': -86.5 },
'HR': { 'latitude': 45.1667, 'longitude': 15.5 },
'HT': { 'latitude': 19, 'longitude': -72.4167 },
'HU': { 'latitude': 47, 'longitude': 20 },
'ID': { 'latitude': -5, 'longitude': 120 },
'IE': { 'latitude': 53, 'longitude': -8 },
'IL': { 'latitude': 31.5, 'longitude': 34.75 },
'IN': { 'latitude': 20, 'longitude': 77 },
'IO': { 'latitude': -6, 'longitude': 71.5 },
'IQ': { 'latitude': 33, 'longitude': 44 },
'IR': { 'latitude': 32, 'longitude': 53 },
'IS': { 'latitude': 65, 'longitude': -18 },
'IT': { 'latitude': 42.8333, 'longitude': 12.8333 },
'JM': { 'latitude': 18.25, 'longitude': -77.5 },
'JO': { 'latitude': 31, 'longitude': 36 },
'JP': { 'latitude': 36, 'longitude': 138 },
'KE': { 'latitude': 1, 'longitude': 38 },
'KG': { 'latitude': 41, 'longitude': 75 },
'KH': { 'latitude': 13, 'longitude': 105 },
'KI': { 'latitude': 1.4167, 'longitude': 173 },
'KM': { 'latitude': -12.1667, 'longitude': 44.25 },
'KN': { 'latitude': 17.3333, 'longitude': -62.75 },
'KP': { 'latitude': 40, 'longitude': 127 },
'KR': { 'latitude': 37, 'longitude': 127.5 },
'KW': { 'latitude': 29.3375, 'longitude': 47.6581 },
'KY': { 'latitude': 19.5, 'longitude': -80.5 },
'KZ': { 'latitude': 48, 'longitude': 68 },
'LA': { 'latitude': 18, 'longitude': 105 },
'LB': { 'latitude': 33.8333, 'longitude': 35.8333 },
'LC': { 'latitude': 13.8833, 'longitude': -61.1333 },
'LI': { 'latitude': 47.1667, 'longitude': 9.5333 },
'LK': { 'latitude': 7, 'longitude': 81 },
'LR': { 'latitude': 6.5, 'longitude': -9.5 },
'LS': { 'latitude': -29.5, 'longitude': 28.5 },
'LT': { 'latitude': 55, 'longitude': 24 },
'LU': { 'latitude': 49.75, 'longitude': 6 },
'LV': { 'latitude': 57, 'longitude': 25 },
'LY': { 'latitude': 25, 'longitude': 17 },
'MA': { 'latitude': 32, 'longitude': -5 },
'MC': { 'latitude': 43.7333, 'longitude': 7.4 },
'MD': { 'latitude': 47, 'longitude': 29 },
'ME': { 'latitude': 42.5, 'longitude': 19.4 },
'MG': { 'latitude': -20, 'longitude': 47 },
'MH': { 'latitude': 9, 'longitude': 168 },
'MK': { 'latitude': 41.8333, 'longitude': 22 },
'ML': { 'latitude': 17, 'longitude': -4 },
'MM': { 'latitude': 22, 'longitude': 98 },
'MN': { 'latitude': 46, 'longitude': 105 },
'MO': { 'latitude': 22.1667, 'longitude': 113.55 },
'MP': { 'latitude': 15.2, 'longitude': 145.75 },
'MQ': { 'latitude': 14.6667, 'longitude': -61 },
'MR': { 'latitude': 20, 'longitude': -12 },
'MS': { 'latitude': 16.75, 'longitude': -62.2 },
'MT': { 'latitude': 35.8333, 'longitude': 14.5833 },
'MU': { 'latitude': -20.2833, 'longitude': 57.55 },
'MV': { 'latitude': 3.25, 'longitude': 73 },
'MW': { 'latitude': -13.5, 'longitude': 34 },
'MX': { 'latitude': 23, 'longitude': -102 },
'MY': { 'latitude': 2.5, 'longitude': 112.5 },
'MZ': { 'latitude': -18.25, 'longitude': 35 },
'NA': { 'latitude': -22, 'longitude': 17 },
'NC': { 'latitude': -21.5, 'longitude': 165.5 },
'NE': { 'latitude': 16, 'longitude': 8 },
'NF': { 'latitude': -29.0333, 'longitude': 167.95 },
'NG': { 'latitude': 10, 'longitude': 8 },
'NI': { 'latitude': 13, 'longitude': -85 },
'NL': { 'latitude': 52.5, 'longitude': 5.75 },
'NO': { 'latitude': 62, 'longitude': 10 },
'NP': { 'latitude': 28, 'longitude': 84 },
'NR': { 'latitude': -0.5333, 'longitude': 166.9167 },
'NU': { 'latitude': -19.0333, 'longitude': -169.8667 },
'NZ': { 'latitude': -41, 'longitude': 174 },
'OM': { 'latitude': 21, 'longitude': 57 },
'PA': { 'latitude': 9, 'longitude': -80 },
'PE': { 'latitude': -10, 'longitude': -76 },
'PF': { 'latitude': -15, 'longitude': -140 },
'PG': { 'latitude': -6, 'longitude': 147 },
'PH': { 'latitude': 13, 'longitude': 122 },
'PK': { 'latitude': 30, 'longitude': 70 },
'PL': { 'latitude': 52, 'longitude': 20 },
'PM': { 'latitude': 46.8333, 'longitude': -56.3333 },
'PR': { 'latitude': 18.25, 'longitude': -66.5 },
'PS': { 'latitude': 32, 'longitude': 35.25 },
'PT': { 'latitude': 39.5, 'longitude': -8 },
'PW': { 'latitude': 7.5, 'longitude': 134.5 },
'PY': { 'latitude': -23, 'longitude': -58 },
'QA': { 'latitude': 25.5, 'longitude': 51.25 },
'RE': { 'latitude': -21.1, 'longitude': 55.6 },
'RO': { 'latitude': 46, 'longitude': 25 },
'RS': { 'latitude': 44, 'longitude': 21 },
'RU': { 'latitude': 60, 'longitude': 100 },
'RW': { 'latitude': -2, 'longitude': 30 },
'SA': { 'latitude': 25, 'longitude': 45 },
'SB': { 'latitude': -8, 'longitude': 159 },
'SC': { 'latitude': -4.5833, 'longitude': 55.6667 },
'SD': { 'latitude': 15, 'longitude': 30 },
'SE': { 'latitude': 62, 'longitude': 15 },
'SG': { 'latitude': 1.3667, 'longitude': 103.8 },
'SH': { 'latitude': -15.9333, 'longitude': -5.7 },
'SI': { 'latitude': 46, 'longitude': 15 },
'SJ': { 'latitude': 78, 'longitude': 20 },
'SK': { 'latitude': 48.6667, 'longitude': 19.5 },
'SL': { 'latitude': 8.5, 'longitude': -11.5 },
'SM': { 'latitude': 43.7667, 'longitude': 12.4167 },
'SN': { 'latitude': 14, 'longitude': -14 },
'SO': { 'latitude': 10, 'longitude': 49 },
'SR': { 'latitude': 4, 'longitude': -56 },
'ST': { 'latitude': 1, 'longitude': 7 },
'SV': { 'latitude': 13.8333, 'longitude': -88.9167 },
'SY': { 'latitude': 35, 'longitude': 38 },
'SZ': { 'latitude': -26.5, 'longitude': 31.5 },
'TC': { 'latitude': 21.75, 'longitude': -71.5833 },
'TD': { 'latitude': 15, 'longitude': 19 },
'TF': { 'latitude': -43, 'longitude': 67 },
'TG': { 'latitude': 8, 'longitude': 1.1667 },
'TH': { 'latitude': 15, 'longitude': 100 },
'TJ': { 'latitude': 39, 'longitude': 71 },
'TK': { 'latitude': -9, 'longitude': -172 },
'TM': { 'latitude': 40, 'longitude': 60 },
'TN': { 'latitude': 34, 'longitude': 9 },
'TO': { 'latitude': -20, 'longitude': -175 },
'TR': { 'latitude': 39, 'longitude': 35 },
'TT': { 'latitude': 11, 'longitude': -61 },
'TV': { 'latitude': -8, 'longitude': 178 },
'TW': { 'latitude': 23.5, 'longitude': 121 },
'TZ': { 'latitude': -6, 'longitude': 35 },
'UA': { 'latitude': 49, 'longitude': 32 },
'UG': { 'latitude': 1, 'longitude': 32 },
'UM': { 'latitude': 19.2833, 'longitude': 166.6 },
'US': { 'latitude': 38, 'longitude': -97 },
'UY': { 'latitude': -33, 'longitude': -56 },
'UZ': { 'latitude': 41, 'longitude': 64 },
'VA': { 'latitude': 41.9, 'longitude': 12.45 },
'VC': { 'latitude': 13.25, 'longitude': -61.2 },
'VE': { 'latitude': 8, 'longitude': -66 },
'VG': { 'latitude': 18.5, 'longitude': -64.5 },
'VI': { 'latitude': 18.3333, 'longitude': -64.8333 },
'VN': { 'latitude': 16, 'longitude': 106 },
'VU': { 'latitude': -16, 'longitude': 167 },
'WF': { 'latitude': -13.3, 'longitude': -176.2 },
'WS': { 'latitude': -13.5833, 'longitude': -172.3333 },
'YE': { 'latitude': 15, 'longitude': 48 },
'YT': { 'latitude': -12.8333, 'longitude': 45.1667 },
'ZA': { 'latitude': -29, 'longitude': 24 },
'ZM': { 'latitude': -15, 'longitude': 30 },
'ZW': { 'latitude': -20, 'longitude': 30 },
};
this.mapData = [
{ 'code': 'AF', 'name': 'Afghanistan', 'value': 32358260, 'color': this.getRandomGeoColor() },
{ 'code': 'AL', 'name': 'Albania', 'value': 3215988, 'color': this.getRandomGeoColor() },
{ 'code': 'DZ', 'name': 'Algeria', 'value': 35980193, 'color': this.getRandomGeoColor() },
{ 'code': 'AO', 'name': 'Angola', 'value': 19618432, 'color': this.getRandomGeoColor() },
{ 'code': 'AR', 'name': 'Argentina', 'value': 40764561, 'color': this.getRandomGeoColor() },
{ 'code': 'AM', 'name': 'Armenia', 'value': 3100236, 'color': this.getRandomGeoColor() },
{ 'code': 'AU', 'name': 'Australia', 'value': 22605732, 'color': this.getRandomGeoColor() },
{ 'code': 'AT', 'name': 'Austria', 'value': 8413429, 'color': this.getRandomGeoColor() },
{ 'code': 'AZ', 'name': 'Azerbaijan', 'value': 9306023, 'color': this.getRandomGeoColor() },
{ 'code': 'BH', 'name': 'Bahrain', 'value': 1323535, 'color': this.getRandomGeoColor() },
{ 'code': 'BD', 'name': 'Bangladesh', 'value': 150493658, 'color': this.getRandomGeoColor() },
{ 'code': 'BY', 'name': 'Belarus', 'value': 9559441, 'color': this.getRandomGeoColor() },
{ 'code': 'BE', 'name': 'Belgium', 'value': 10754056, 'color': this.getRandomGeoColor() },
{ 'code': 'BJ', 'name': 'Benin', 'value': 9099922, 'color': this.getRandomGeoColor() },
{ 'code': 'BT', 'name': 'Bhutan', 'value': 738267, 'color': this.getRandomGeoColor() },
{ 'code': 'BO', 'name': 'Bolivia', 'value': 10088108, 'color': this.getRandomGeoColor() },
{ 'code': 'BA', 'name': 'Bosnia and Herzegovina', 'value': 3752228, 'color': this.getRandomGeoColor() },
{ 'code': 'BW', 'name': 'Botswana', 'value': 2030738, 'color': this.getRandomGeoColor() },
{ 'code': 'BR', 'name': 'Brazil', 'value': 196655014, 'color': this.getRandomGeoColor() },
{ 'code': 'BN', 'name': 'Brunei', 'value': 405938, 'color': this.getRandomGeoColor() },
{ 'code': 'BG', 'name': 'Bulgaria', 'value': 7446135, 'color': this.getRandomGeoColor() },
{ 'code': 'BF', 'name': 'Burkina Faso', 'value': 16967845, 'color': this.getRandomGeoColor() },
{ 'code': 'BI', 'name': 'Burundi', 'value': 8575172, 'color': this.getRandomGeoColor() },
{ 'code': 'KH', 'name': 'Cambodia', 'value': 14305183, 'color': this.getRandomGeoColor() },
{ 'code': 'CM', 'name': 'Cameroon', 'value': 20030362, 'color': this.getRandomGeoColor() },
{ 'code': 'CA', 'name': 'Canada', 'value': 34349561, 'color': this.getRandomGeoColor() },
{ 'code': 'CV', 'name': 'Cape Verde', 'value': 500585, 'color': this.getRandomGeoColor() },
{ 'code': 'CF', 'name': 'Central African Rep.', 'value': 4486837, 'color': this.getRandomGeoColor() },
{ 'code': 'TD', 'name': 'Chad', 'value': 11525496, 'color': this.getRandomGeoColor() },
{ 'code': 'CL', 'name': 'Chile', 'value': 17269525, 'color': this.getRandomGeoColor() },
{ 'code': 'CN', 'name': 'China', 'value': 1347565324, 'color': this.getRandomGeoColor() },
{ 'code': 'CO', 'name': 'Colombia', 'value': 46927125, 'color': this.getRandomGeoColor() },
{ 'code': 'KM', 'name': 'Comoros', 'value': 753943, 'color': this.getRandomGeoColor() },
{ 'code': 'CD', 'name': 'Congo, Dem. Rep.', 'value': 67757577, 'color': this.getRandomGeoColor() },
{ 'code': 'CG', 'name': 'Congo, Rep.', 'value': 4139748, 'color': this.getRandomGeoColor() },
{ 'code': 'CR', 'name': 'Costa Rica', 'value': 4726575, 'color': this.getRandomGeoColor() },
{ 'code': 'CI', 'name': 'Cote d\'Ivoire', 'value': 20152894, 'color': this.getRandomGeoColor() },
{ 'code': 'HR', 'name': 'Croatia', 'value': 4395560, 'color': this.getRandomGeoColor() },
{ 'code': 'CU', 'name': 'Cuba', 'value': 11253665, 'color': this.getRandomGeoColor() },
{ 'code': 'CY', 'name': 'Cyprus', 'value': 1116564, 'color': this.getRandomGeoColor() },
{ 'code': 'CZ', 'name': 'Czech Rep.', 'value': 10534293, 'color': this.getRandomGeoColor() },
{ 'code': 'DK', 'name': 'Denmark', 'value': 5572594, 'color': this.getRandomGeoColor() },
{ 'code': 'DJ', 'name': 'Djibouti', 'value': 905564, 'color': this.getRandomGeoColor() },
{ 'code': 'DO', 'name': 'Dominican Rep.', 'value': 10056181, 'color': this.getRandomGeoColor() },
{ 'code': 'EC', 'name': 'Ecuador', 'value': 14666055, 'color': this.getRandomGeoColor() },
{ 'code': 'EG', 'name': 'Egypt', 'value': 82536770, 'color': this.getRandomGeoColor() },
{ 'code': 'SV', 'name': 'El Salvador', 'value': 6227491, 'color': this.getRandomGeoColor() },
{ 'code': 'GQ', 'name': 'Equatorial Guinea', 'value': 720213, 'color': this.getRandomGeoColor() },
{ 'code': 'ER', 'name': 'Eritrea', 'value': 5415280, 'color': this.getRandomGeoColor() },
{ 'code': 'EE', 'name': 'Estonia', 'value': 1340537, 'color': this.getRandomGeoColor() },
{ 'code': 'ET', 'name': 'Ethiopia', 'value': 84734262, 'color': this.getRandomGeoColor() },
{ 'code': 'FJ', 'name': 'Fiji', 'value': 868406, 'color': this.getRandomGeoColor() },
{ 'code': 'FI', 'name': 'Finland', 'value': 5384770, 'color': this.getRandomGeoColor() },
{ 'code': 'FR', 'name': 'France', 'value': 63125894, 'color': this.getRandomGeoColor() },
{ 'code': 'GA', 'name': 'Gabon', 'value': 1534262, 'color': this.getRandomGeoColor() },
{ 'code': 'GM', 'name': 'Gambia', 'value': 1776103, 'color': this.getRandomGeoColor() },
{ 'code': 'GE', 'name': 'Georgia', 'value': 4329026, 'color': this.getRandomGeoColor() },
{ 'code': 'DE', 'name': 'Germany', 'value': 82162512, 'color': this.getRandomGeoColor() },
{ 'code': 'GH', 'name': 'Ghana', 'value': 24965816, 'color': this.getRandomGeoColor() },
{ 'code': 'GR', 'name': 'Greece', 'value': 11390031, 'color': this.getRandomGeoColor() },
{ 'code': 'GT', 'name': 'Guatemala', 'value': 14757316, 'color': this.getRandomGeoColor() },
{ 'code': 'GN', 'name': 'Guinea', 'value': 10221808, 'color': this.getRandomGeoColor() },
{ 'code': 'GW', 'name': 'Guinea-Bissau', 'value': 1547061, 'color': this.getRandomGeoColor() },
{ 'code': 'GY', 'name': 'Guyana', 'value': 756040, 'color': this.getRandomGeoColor() },
{ 'code': 'HT', 'name': 'Haiti', 'value': 10123787, 'color': this.getRandomGeoColor() },
{ 'code': 'HN', 'name': 'Honduras', 'value': 7754687, 'color': this.getRandomGeoColor() },
{ 'code': 'HK', 'name': 'Hong Kong, China', 'value': 7122187, 'color': this.getRandomGeoColor() },
{ 'code': 'HU', 'name': 'Hungary', 'value': 9966116, 'color': this.getRandomGeoColor() },
{ 'code': 'IS', 'name': 'Iceland', 'value': 324366, 'color': this.getRandomGeoColor() },
{ 'code': 'IN', 'name': 'India', 'value': 1241491960, 'color': this.getRandomGeoColor() },
{ 'code': 'ID', 'name': 'Indonesia', 'value': 242325638, 'color': this.getRandomGeoColor() },
{ 'code': 'IR', 'name': 'Iran', 'value': 74798599, 'color': this.getRandomGeoColor() },
{ 'code': 'IQ', 'name': 'Iraq', 'value': 32664942, 'color': this.getRandomGeoColor() },
{ 'code': 'IE', 'name': 'Ireland', 'value': 4525802, 'color': this.getRandomGeoColor() },
{ 'code': 'IL', 'name': 'Israel', 'value': 7562194, 'color': this.getRandomGeoColor() },
{ 'code': 'IT', 'name': 'Italy', 'value': 60788694, 'color': this.getRandomGeoColor() },
{ 'code': 'JM', 'name': 'Jamaica', 'value': 2751273, 'color': this.getRandomGeoColor() },
{ 'code': 'JP', 'name': 'Japan', 'value': 126497241, 'color': this.getRandomGeoColor() },
{ 'code': 'JO', 'name': 'Jordan', 'value': 6330169, 'color': this.getRandomGeoColor() },
{ 'code': 'KZ', 'name': 'Kazakhstan', 'value': 16206750, 'color': this.getRandomGeoColor() },
{ 'code': 'KE', 'name': 'Kenya', 'value': 41609728, 'color': this.getRandomGeoColor() },
{ 'code': 'KP', 'name': 'Korea, Dem. Rep.', 'value': 24451285, 'color': this.getRandomGeoColor() },
{ 'code': 'KR', 'name': 'Korea, Rep.', 'value': 48391343, 'color': this.getRandomGeoColor() },
{ 'code': 'KW', 'name': 'Kuwait', 'value': 2818042, 'color': this.getRandomGeoColor() },
{ 'code': 'KG', 'name': 'Kyrgyzstan', 'value': 5392580, 'color': this.getRandomGeoColor() },
{ 'code': 'LA', 'name': 'Laos', 'value': 6288037, 'color': this.getRandomGeoColor() },
{ 'code': 'LV', 'name': 'Latvia', 'value': 2243142, 'color': this.getRandomGeoColor() },
{ 'code': 'LB', 'name': 'Lebanon', 'value': 4259405, 'color': this.getRandomGeoColor() },
{ 'code': 'LS', 'name': 'Lesotho', 'value': 2193843, 'color': this.getRandomGeoColor() },
{ 'code': 'LR', 'name': 'Liberia', 'value': 4128572, 'color': this.getRandomGeoColor() },
{ 'code': 'LY', 'name': 'Libya', 'value': 6422772, 'color': this.getRandomGeoColor() },
{ 'code': 'LT', 'name': 'Lithuania', 'value': 3307481, 'color': this.getRandomGeoColor() },
{ 'code': 'LU', 'name': 'Luxembourg', 'value': 515941, 'color': this.getRandomGeoColor() },
{ 'code': 'MK', 'name': 'Macedonia, FYR', 'value': 2063893, 'color': this.getRandomGeoColor() },
{ 'code': 'MG', 'name': 'Madagascar', 'value': 21315135, 'color': this.getRandomGeoColor() },
{ 'code': 'MW', 'name': 'Malawi', 'value': 15380888, 'color': this.getRandomGeoColor() },
{ 'code': 'MY', 'name': 'Malaysia', 'value': 28859154, 'color': this.getRandomGeoColor() },
{ 'code': 'ML', 'name': 'Mali', 'value': 15839538, 'color': this.getRandomGeoColor() },
{ 'code': 'MR', 'name': 'Mauritania', 'value': 3541540, 'color': this.getRandomGeoColor() },
{ 'code': 'MU', 'name': 'Mauritius', 'value': 1306593, 'color': this.getRandomGeoColor() },
{ 'code': 'MX', 'name': 'Mexico', 'value': 114793341, 'color': this.getRandomGeoColor() },
{ 'code': 'MD', 'name': 'Moldova', 'value': 3544864, 'color': this.getRandomGeoColor() },
{ 'code': 'MN', 'name': 'Mongolia', 'value': 2800114, 'color': this.getRandomGeoColor() },
{ 'code': 'ME', 'name': 'Montenegro', 'value': 632261, 'color': this.getRandomGeoColor() },
{ 'code': 'MA', 'name': 'Morocco', 'value': 32272974, 'color': this.getRandomGeoColor() },
{ 'code': 'MZ', 'name': 'Mozambique', 'value': 23929708, 'color': this.getRandomGeoColor() },
{ 'code': 'MM', 'name': 'Myanmar', 'value': 48336763, 'color': this.getRandomGeoColor() },
{ 'code': 'NA', 'name': 'Namibia', 'value': 2324004, 'color': this.getRandomGeoColor() },
{ 'code': 'NP', 'name': 'Nepal', 'value': 30485798, 'color': this.getRandomGeoColor() },
{ 'code': 'NL', 'name': 'Netherlands', 'value': 16664746, 'color': this.getRandomGeoColor() },
{ 'code': 'NZ', 'name': 'New Zealand', 'value': 4414509, 'color': this.getRandomGeoColor() },
{ 'code': 'NI', 'name': 'Nicaragua', 'value': 5869859, 'color': this.getRandomGeoColor() },
{ 'code': 'NE', 'name': 'Niger', 'value': 16068994, 'color': this.getRandomGeoColor() },
{ 'code': 'NG', 'name': 'Nigeria', 'value': 162470737, 'color': this.getRandomGeoColor() },
{ 'code': 'NO', 'name': 'Norway', 'value': 4924848, 'color': this.getRandomGeoColor() },
{ 'code': 'OM', 'name': 'Oman', 'value': 2846145, 'color': this.getRandomGeoColor() },
{ 'code': 'PK', 'name': 'Pakistan', 'value': 176745364, 'color': this.getRandomGeoColor() },
{ 'code': 'PA', 'name': 'Panama', 'value': 3571185, 'color': this.getRandomGeoColor() },
{ 'code': 'PG', 'name': 'Papua New Guinea', 'value': 7013829, 'color': this.getRandomGeoColor() },
{ 'code': 'PY', 'name': 'Paraguay', 'value': 6568290, 'color': this.getRandomGeoColor() },
{ 'code': 'PE', 'name': 'Peru', 'value': 29399817, 'color': this.getRandomGeoColor() },
{ 'code': 'PH', 'name': 'Philippines', 'value': 94852030, 'color': this.getRandomGeoColor() },
{ 'code': 'PL', 'name': 'Poland', 'value': 38298949, 'color': this.getRandomGeoColor() },
{ 'code': 'PT', 'name': 'Portugal', 'value': 10689663, 'color': this.getRandomGeoColor() },
<|fim▁hole|> { 'code': 'RO', 'name': 'Romania', 'value': 21436495, 'color': this.getRandomGeoColor() },
{ 'code': 'RU', 'name': 'Russia', 'value': 142835555, 'color': this.getRandomGeoColor() },
{ 'code': 'RW', 'name': 'Rwanda', 'value': 10942950, 'color': this.getRandomGeoColor() },
{ 'code': 'SA', 'name': 'Saudi Arabia', 'value': 28082541, 'color': this.getRandomGeoColor() },
{ 'code': 'SN', 'name': 'Senegal', 'value': 12767556, 'color': this.getRandomGeoColor() },
{ 'code': 'RS', 'name': 'Serbia', 'value': 9853969, 'color': this.getRandomGeoColor() },
{ 'code': 'SL', 'name': 'Sierra Leone', 'value': 5997486, 'color': this.getRandomGeoColor() },
{ 'code': 'SG', 'name': 'Singapore', 'value': 5187933, 'color': this.getRandomGeoColor() },
{ 'code': 'SK', 'name': 'Slovak Republic', 'value': 5471502, 'color': this.getRandomGeoColor() },
{ 'code': 'SI', 'name': 'Slovenia', 'value': 2035012, 'color': this.getRandomGeoColor() },
{ 'code': 'SB', 'name': 'Solomon Islands', 'value': 552267, 'color': this.getRandomGeoColor() },
{ 'code': 'SO', 'name': 'Somalia', 'value': 9556873, 'color': this.getRandomGeoColor() },
{ 'code': 'ZA', 'name': 'South Africa', 'value': 50459978, 'color': this.getRandomGeoColor() },
{ 'code': 'ES', 'name': 'Spain', 'value': 46454895, 'color': this.getRandomGeoColor() },
{ 'code': 'LK', 'name': 'Sri Lanka', 'value': 21045394, 'color': this.getRandomGeoColor() },
{ 'code': 'SD', 'name': 'Sudan', 'value': 34735288, 'color': this.getRandomGeoColor() },
{ 'code': 'SR', 'name': 'Suriname', 'value': 529419, 'color': this.getRandomGeoColor() },
{ 'code': 'SZ', 'name': 'Swaziland', 'value': 1203330, 'color': this.getRandomGeoColor() },
{ 'code': 'SE', 'name': 'Sweden', 'value': 9440747, 'color': this.getRandomGeoColor() },
{ 'code': 'CH', 'name': 'Switzerland', 'value': 7701690, 'color': this.getRandomGeoColor() },
{ 'code': 'SY', 'name': 'Syria', 'value': 20766037, 'color': this.getRandomGeoColor() },
{ 'code': 'TW', 'name': 'Taiwan', 'value': 23072000, 'color': this.getRandomGeoColor() },
{ 'code': 'TJ', 'name': 'Tajikistan', 'value': 6976958, 'color': this.getRandomGeoColor() },
{ 'code': 'TZ', 'name': 'Tanzania', 'value': 46218486, 'color': this.getRandomGeoColor() },
{ 'code': 'TH', 'name': 'Thailand', 'value': 69518555, 'color': this.getRandomGeoColor() },
{ 'code': 'TG', 'name': 'Togo', 'value': 6154813, 'color': this.getRandomGeoColor() },
{ 'code': 'TT', 'name': 'Trinidad and Tobago', 'value': 1346350, 'color': this.getRandomGeoColor() },
{ 'code': 'TN', 'name': 'Tunisia', 'value': 10594057, 'color': this.getRandomGeoColor() },
{ 'code': 'TR', 'name': 'Turkey', 'value': 73639596, 'color': this.getRandomGeoColor() },
{ 'code': 'TM', 'name': 'Turkmenistan', 'value': 5105301, 'color': this.getRandomGeoColor() },
{ 'code': 'UG', 'name': 'Uganda', 'value': 34509205, 'color': this.getRandomGeoColor() },
{ 'code': 'UA', 'name': 'Ukraine', 'value': 45190180, 'color': this.getRandomGeoColor() },
{ 'code': 'AE', 'name': 'United Arab Emirates', 'value': 7890924, 'color': this.getRandomGeoColor() },
{ 'code': 'GB', 'name': 'United Kingdom', 'value': 62417431, 'color': this.getRandomGeoColor() },
{ 'code': 'US', 'name': 'United States', 'value': 313085380, 'color': this.getRandomGeoColor() },
{ 'code': 'UY', 'name': 'Uruguay', 'value': 3380008, 'color': this.getRandomGeoColor() },
{ 'code': 'UZ', 'name': 'Uzbekistan', 'value': 27760267, 'color': this.getRandomGeoColor() },
{ 'code': 'VE', 'name': 'Venezuela', 'value': 29436891, 'color': this.getRandomGeoColor() },
{ 'code': 'PS', 'name': 'West Bank and Gaza', 'value': 4152369, 'color': this.getRandomGeoColor() },
{ 'code': 'VN', 'name': 'Vietnam', 'value': 88791996, 'color': this.getRandomGeoColor() },
{ 'code': 'YE', 'name': 'Yemen, Rep.', 'value': 24799880, 'color': this.getRandomGeoColor() },
{ 'code': 'ZM', 'name': 'Zambia', 'value': 13474959, 'color': this.getRandomGeoColor() },
{ 'code': 'ZW', 'name': 'Zimbabwe', 'value': 12754378, 'color': this.getRandomGeoColor() }];
this.mapData.forEach((itemOpt) => {
if (itemOpt.value > this.max) {
this.max = itemOpt.value;
}
if (itemOpt.value < this.min) {
this.min = itemOpt.value;
}
});
this.options = {
title: {
text: 'World Population (2011)',
left: 'center',
top: 'top',
textStyle: {
color: this.bubbleTheme.titleColor,
},
},
tooltip: {
trigger: 'item',
formatter: params => {
return `${params.name}: ${params.value[2]}`;
},
},
visualMap: {
show: false,
min: 0,
max: this.max,
inRange: {
symbolSize: [6, 60],
},
},
geo: {
name: 'World Population (2010)',
type: 'map',
map: 'world',
roam: true,
label: {
emphasis: {
show: false,
},
},
itemStyle: {
normal: {
areaColor: this.bubbleTheme.areaColor,
borderColor: this.bubbleTheme.areaBorderColor,
},
emphasis: {
areaColor: this.bubbleTheme.areaHoverColor,
},
},
zoom: 1.1,
},
series: [
{
type: 'scatter',
coordinateSystem: 'geo',
data: this.mapData.map(itemOpt => {
return {
name: itemOpt.name,
value: [
this.latlong[itemOpt.code].longitude,
this.latlong[itemOpt.code].latitude,
itemOpt.value,
],
itemStyle: {
normal: {
color: itemOpt.color,
},
},
};
}),
},
],
};
});
}
ngOnDestroy() {
this.alive = false;
}
private getRandomGeoColor() {
const index = Math.round(Math.random() * this.geoColors.length);
return this.geoColors[index];
}
}<|fim▁end|>
|
{ 'code': 'PR', 'name': 'Puerto Rico', 'value': 3745526, 'color': this.getRandomGeoColor() },
{ 'code': 'QA', 'name': 'Qatar', 'value': 1870041, 'color': this.getRandomGeoColor() },
|
<|file_name|>VertexBufferObject.cpp<|end_file_name|><|fim▁begin|>#include <stdafx.h>
#include "VertexBufferObject.h"
VertexBufferObject::VertexBufferObject() {
glGenBuffers(1, &_id);
}
VertexBufferObject::~VertexBufferObject() {
glDeleteBuffers(1, &_id);
}
<|fim▁hole|>}
void VertexBufferObject::Upload(void *data, size_t size, int type) {
glBufferData(GL_ARRAY_BUFFER, size, data, type);
}<|fim▁end|>
|
void VertexBufferObject::Bind() {
glBindBuffer(GL_ARRAY_BUFFER, _id);
|
<|file_name|>protected_function_result.cpp<|end_file_name|><|fim▁begin|>// sol2
// The MIT License (MIT)
// Copyright (c) 2013-2021 Rapptz, ThePhD and contributors
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
<|fim▁hole|>// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include <sol/protected_function_result.hpp><|fim▁end|>
|
// The above copyright notice and this permission notice shall be included in all
|
<|file_name|>text.js<|end_file_name|><|fim▁begin|>import e3AnimatedChild from '../e3-animated-child';
export default e3AnimatedChild.extend({
shadowType: 'text',
enterState: {
x: 0,
y: 0,
text: ''
},<|fim▁hole|> y: null,
text: null
}
});<|fim▁end|>
|
activeState: {
x: null,
|
<|file_name|>ChFunction_Poly345.cpp<|end_file_name|><|fim▁begin|>// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Alessandro Tasora, Radu Serban
// =============================================================================
#include "chrono/motion_functions/ChFunction_Poly345.h"
namespace chrono {
// Register into the object factory, to enable run-time dynamic creation and persistence
CH_FACTORY_REGISTER(ChFunction_Poly345)
ChFunction_Poly345::ChFunction_Poly345(double m_h, double m_end) : h(m_h) {
Set_end(m_end);
}
ChFunction_Poly345::ChFunction_Poly345(const ChFunction_Poly345& other) {
h = other.h;
end = other.end;
}
double ChFunction_Poly345::Get_y(double x) const {
double ret = 0;
if (x <= 0)
return 0;
if (x >= end)
return h;
double a = x / end;
ret = h * (10 * pow(a, 3) - 15 * pow(a, 4) + 6 * pow(a, 5));
return ret;<|fim▁hole|> if (x <= 0)
return 0;
if (x >= end)
return 0;
double a = x / end;
ret = h * (1 / end) * (30 * pow(a, 2) - 60 * pow(a, 3) + 30 * pow(a, 4));
return ret;
}
double ChFunction_Poly345::Get_y_dxdx(double x) const {
double ret = 0;
if (x <= 0)
return 0;
if (x >= end)
return 0;
double a = x / end;
ret = h * (1 / (end * end)) * (60 * a - 180 * pow(a, 2) + 120 * pow(a, 3));
return ret;
}
void ChFunction_Poly345::ArchiveOUT(ChArchiveOut& marchive) {
// version number
marchive.VersionWrite<ChFunction_Poly345>();
// serialize parent class
ChFunction::ArchiveOUT(marchive);
// serialize all member data:
marchive << CHNVP(h);
marchive << CHNVP(end);
}
void ChFunction_Poly345::ArchiveIN(ChArchiveIn& marchive) {
// version number
/*int version =*/ marchive.VersionRead<ChFunction_Poly345>();
// deserialize parent class
ChFunction::ArchiveIN(marchive);
// stream in all member data:
marchive >> CHNVP(h);
marchive >> CHNVP(end);
}
} // end namespace chrono<|fim▁end|>
|
}
double ChFunction_Poly345::Get_y_dx(double x) const {
double ret = 0;
|
<|file_name|>utils.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>use rpb::riak::RpbBucketProps;
use rpb::riak_dt::{DtFetchResp, DtValue, MapEntry, MapField};
use rpb::riak_kv::{RpbContent, RpbGetResp, RpbIndexReq_IndexQueryType, RpbIndexResp};
use rpb::riak_search::RpbSearchQueryResp;
use rpb::riak_yokozuna::RpbYokozunaIndex;
use secondary_index::IndexResp;
use yokozuna::{SearchQueryResp, YokozunaIndex};
pub trait ProtobufBytes {
fn write_to_bytes(self) -> Result<Vec<u8>, RiakErr>;
}
pub trait BucketPropsPrivate {
fn set_props(&mut self, RpbBucketProps);
}
pub trait YokozunaIndexPrivate {
fn new_from_rpb_yokozuna_index(RpbYokozunaIndex) -> YokozunaIndex;
}
pub trait ObjectContentPrivate {
fn into_rpb(self) -> RpbContent;
fn new_from_rpb(RpbContent) -> ObjectContent;
}
pub trait FetchObjectRespPrivate {
fn new_from_rpb(RpbGetResp) -> FetchObjectResp;
}
pub trait SearchQueryRespPrivate {
fn new_from_rpb(RpbSearchQueryResp) -> SearchQueryResp;
}
pub trait DataTypeValuePrivate {
fn new_from_rpb(DtValue) -> DataTypeValue;
}
pub trait DataTypeMapEntryPrivate {
fn new_from_rpb(MapEntry) -> DataTypeMapEntry;
}
pub trait DataTypeMapFieldPrivate {
fn new_from_rpb(MapField) -> DataTypeMapField;
}
pub trait DataTypeFetchRespPrivate {
fn new_from_rpb(DtFetchResp) -> DataTypeFetchResp;
}
pub trait IndexQueryTypePrivate {
fn to_rpb(self) -> RpbIndexReq_IndexQueryType;
}
pub trait IndexRespPrivate {
fn new_from_rpb(RpbIndexResp) -> IndexResp;
}<|fim▁end|>
|
// TODO redo the "Private" traits
use data_type::{DataTypeFetchResp, DataTypeMapEntry, DataTypeMapField, DataTypeValue};
use errors::RiakErr;
use object::{FetchObjectResp, ObjectContent};
|
<|file_name|>class-dtor.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct cat {
done : extern fn(uint),
meows : uint,
}
impl Drop for cat {
fn drop(&mut self) {
(self.done)(self.meows);<|fim▁hole|>}
fn cat(done: extern fn(uint)) -> cat {
cat {
meows: 0_usize,
done: done
}
}
pub fn main() {}<|fim▁end|>
|
}
|
<|file_name|>connections.go<|end_file_name|><|fim▁begin|>package rabbithole
import (
"net/http"
"net/url"
)
// ConnectionInfo provides information about connection to a RabbitMQ node.
type ConnectionInfo struct {
// Connection name
Name string `json:"name"`
// Node the client is connected to
Node string `json:"node"`
// Number of open channels
Channels int `json:"channels"`
// Connection state
State string `json:"state"`
// Connection type, network (via AMQP client) or direct (via direct Erlang client)
Type string `json:"type"`
// Server port
Port Port `json:"port"`
// Client port
PeerPort Port `json:"peer_port"`
// Server host
Host string `json:"host"`
// Client host
PeerHost string `json:"peer_host"`
// Last connection blocking reason, if any
LastBlockedBy string `json:"last_blocked_by"`
// When connection was last blocked
LastBlockedAge string `json:"last_blocked_age"`
// True if connection uses TLS/SSL
UsesTLS bool `json:"ssl"`
// Client certificate subject
PeerCertSubject string `json:"peer_cert_subject"`
// Client certificate validity
PeerCertValidity string `json:"peer_cert_validity"`
// Client certificate issuer
PeerCertIssuer string `json:"peer_cert_issuer"`
// TLS/SSL protocol and version
SSLProtocol string `json:"ssl_protocol"`
// Key exchange mechanism
SSLKeyExchange string `json:"ssl_key_exchange"`
// SSL cipher suite used
SSLCipher string `json:"ssl_cipher"`
// SSL hash
SSLHash string `json:"ssl_hash"`
// Protocol, e.g. AMQP 0-9-1 or MQTT 3-1
Protocol string `json:"protocol"`
User string `json:"user"`
// Virtual host
Vhost string `json:"vhost"`
// Heartbeat timeout
Timeout int `json:"timeout"`
// Maximum frame size (AMQP 0-9-1)
FrameMax int `json:"frame_max"`
// A map of client properties (name, version, capabilities, etc)
ClientProperties Properties `json:"client_properties"`
// Octets received
RecvOct uint64 `json:"recv_oct"`
// Octets sent
SendOct uint64 `json:"send_oct"`
RecvCount uint64 `json:"recv_cnt"`
SendCount uint64 `json:"send_cnt"`
SendPending uint64 `json:"send_pend"`
// Ingress data rate
RecvOctDetails RateDetails `json:"recv_oct_details"`
// Egress data rate
SendOctDetails RateDetails `json:"send_oct_details"`
// Connection timestamp
ConnectedAt uint64 `json:"connected_at,omitempty"`
}
//
// GET /api/connections
//
// ListConnections returns a list of client connections to target node.
func (c *Client) ListConnections() (rec []ConnectionInfo, err error) {
req, err := newGETRequest(c, "connections")
if err != nil {
return []ConnectionInfo{}, err
}
if err = executeAndParseRequest(c, req, &rec); err != nil {
return []ConnectionInfo{}, err
}
return rec, nil
}
//
// GET /api/connections/{name}
//
// GetConnection retrieves information about a connection.
func (c *Client) GetConnection(name string) (rec *ConnectionInfo, err error) {
req, err := newGETRequest(c, "connections/"+url.PathEscape(name))
if err != nil {
return nil, err
}
if err = executeAndParseRequest(c, req, &rec); err != nil {
return nil, err
}
return rec, nil
}
//
// DELETE /api/connections/{name}
//
// CloseConnection closes a connection.
func (c *Client) CloseConnection(name string) (res *http.Response, err error) {
req, err := newRequestWithBody(c, "DELETE", "connections/"+url.PathEscape(name), nil)
if err != nil {
return nil, err
}
if res, err = executeRequest(c, req); err != nil {<|fim▁hole|>}<|fim▁end|>
|
return nil, err
}
return res, nil
|
<|file_name|>default_handler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# import json
# replace with simplejson
import simplejson as json
import os
import time
import logging
import traceback
import sys
from oslo_config import cfg
from yabgp.common import constants as bgp_cons
from yabgp.handler import BaseHandler
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
MSG_PROCESS_OPTS = [
cfg.BoolOpt('write_disk',
default=True,
help='Whether the BGP message is written to disk'),
cfg.StrOpt('write_dir',
default=os.path.join(os.environ.get('HOME') or '.', 'data/bgp/'),
help='The BGP messages storage path'),
cfg.IntOpt('write_msg_max_size',
default=500,
help='The Max size of one BGP message file, the unit is MB'),
cfg.BoolOpt('write_keepalive',
default=False,
help='Whether write keepalive message to disk')
]
CONF.register_opts(MSG_PROCESS_OPTS, group='message')
class DefaultHandler(BaseHandler):
def __init__(self):
super(DefaultHandler, self).__init__()
'''
{<peer>: (<path>, <current file>)}
'''
self.peer_files = {}
'''
{<peer>: <seq number>}
'''
self.msg_sequence = {}
def init(self):
if CONF.message.write_disk:
self.init_msg_file(CONF.bgp.running_config['remote_addr'].lower())
def init_msg_file(self, peer_addr):
msg_file_path_for_peer = os.path.join(
CONF.message.write_dir,
peer_addr
)
if not os.path.exists(msg_file_path_for_peer):
os.makedirs(msg_file_path_for_peer)
LOG.info('Create dir %s for peer %s', msg_file_path_for_peer, peer_addr)
LOG.info('BGP message file path is %s', msg_file_path_for_peer)
if msg_file_path_for_peer and peer_addr not in self.peer_files:
msg_path = msg_file_path_for_peer + '/msg/'
if not os.path.exists(msg_path):
os.makedirs(msg_path)
# try get latest file and msg sequence if any
last_msg_seq, msg_file_name = DefaultHandler.get_last_seq_and_file(msg_path)
if not msg_file_name:
msg_file_name = "%s.msg" % time.time()
# store the message sequence
self.msg_sequence[peer_addr] = last_msg_seq + 1
msg_file = open(os.path.join(msg_path, msg_file_name), 'a')
msg_file.flush()
self.peer_files[peer_addr] = (msg_path, msg_file)
LOG.info('BGP message file %s', msg_file_name)
LOG.info('The last bgp message seq number is %s', last_msg_seq)
@staticmethod
def get_last_seq_and_file(msg_path):
"""
Get the last sequence number in the latest log file.
"""
LOG.info('get the last bgp message seq for this peer')
last_seq = 0
# first get the last file
file_list = os.listdir(msg_path)
if not file_list:
return last_seq, None
file_list.sort()
msg_file_name = file_list[-1]
try:
with open(msg_path + msg_file_name, 'r') as fh:
line = None
for line in fh:
pass
last = line
if line:
if last.startswith('['):
last_seq = eval(last)[1]
elif last.startswith('{'):
last_seq = json.loads(last)['seq']
except OSError:
LOG.error('Error when reading bgp message files')
except Exception as e:
LOG.debug(traceback.format_exc())
LOG.error(e)
sys.exit()
return last_seq, msg_file_name
def write_msg(self, peer, timestamp, msg_type, msg):
"""
write bgp message into local disk file
:param peer: peer address
:param timestamp: timestamp
:param msg_type: message type (0,1,2,3,4,5,6)
:param msg: message dict
:param msg_path: path to store messages on disk
:return:
"""
msg_path, msg_file = self.peer_files.get(peer.lower(), (None, None))
if msg_path:
msg_seq = self.msg_sequence[peer.lower()]
msg_record = {
't': timestamp,
'seq': msg_seq,
'type': msg_type
}
msg_record.update(msg)
try:
json.dump(msg_record, msg_file)
except Exception as e:
LOG.error(e)
LOG.info('raw message %s', msg)
msg_file.write('\n')
self.msg_sequence[peer.lower()] += 1
msg_file.flush()
os.fsync(msg_file.fileno())
def check_file_size(self, peer):
"""if the size of the msg file is bigger than 'max_msg_file_size',
then save as and re-open a new file.
"""
msg_path, cur_file = self.peer_files.get(peer.lower(), (None, None))
if msg_path:
if os.path.getsize(cur_file.name) >= CONF.message.write_msg_max_size:
cur_file.close()
msg_file_name = "%s.msg" % time.time()
LOG.info('Open a new message file %s', msg_file_name)
msg_file = open(os.path.join(msg_path + msg_file_name), 'a')
self.peer_files[peer.lower()] = (msg_path, msg_file)
return True
return False
def on_update_error(self, peer, timestamp, msg):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=6,
msg={'msg': msg}
)
def update_received(self, peer, timestamp, msg):
# write message to disk
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=bgp_cons.MSG_UPDATE,
msg={"msg": msg}
)
self.check_file_size(peer.factory.peer_addr)
def keepalive_received(self, peer, timestamp):
"""
keepalive message default handler
:param peer:
:param timestamp:
:return:
"""
if peer.msg_recv_stat['Keepalives'] == 1:
# do something with the connection establish event
pass
if CONF.message.write_keepalive:
# write bgp message
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=4,
msg={"msg": None}
)
def open_received(self, peer, timestamp, result):
# write bgp message
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=1,
msg={"msg": result}
)
def route_refresh_received(self, peer, msg, msg_type):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=msg_type,
msg={"msg": msg}
)<|fim▁hole|> self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=3,
msg={"msg": msg}
)
def on_connection_lost(self, peer):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=bgp_cons.MSG_BGP_CLOSED,
msg={"msg": None}
)
def on_connection_failed(self, peer, msg):
self.write_msg(
peer=peer,
timestamp=time.time(),
msg_type=0,
msg={"msg": msg}
)
def on_established(self, peer, msg):
pass<|fim▁end|>
|
def notification_received(self, peer, msg):
|
<|file_name|>assoc.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::infer::InferCtxt;
use middle::traits::{self, FulfillmentContext, Normalized, MiscObligation,
SelectionContext, ObligationCause};
use middle::ty::{self, HasTypeFlags};
use middle::ty_fold::TypeFoldable;<|fim▁hole|>use syntax::codemap::Span;
pub fn normalize_associated_types_in<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
typer: &(ty::ClosureTyper<'tcx>+'a),
fulfillment_cx: &mut FulfillmentContext<'tcx>,
span: Span,
body_id: ast::NodeId,
value: &T)
-> T
where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_associated_types_in(value={:?})", value);
let mut selcx = SelectionContext::new(infcx, typer);
let cause = ObligationCause::new(span, body_id, MiscObligation);
let Normalized { value: result, obligations } = traits::normalize(&mut selcx, cause, value);
debug!("normalize_associated_types_in: result={:?} predicates={:?}",
result,
obligations);
for obligation in obligations {
fulfillment_cx.register_predicate_obligation(infcx, obligation);
}
result
}<|fim▁end|>
|
use syntax::ast;
|
<|file_name|>cloudsignaturebot.py<|end_file_name|><|fim▁begin|>"""This is the Cloud Signature Bot based on Time4Mind and Telegram
It allow to sign documents using a Telegram chat and a Time4Mind account
"""
import sys
import os
import yaml
import logging
import time
import datetime
import uuid
import urllib.request
import shutil
import re
import magic
import json
from threading import Thread
from queue import Queue
from time4mind import Time4Mind
from telegram.ext import Updater, CommandHandler
from telegram.ext import MessageHandler, Filters
from telegram import Bot
from flask import Flask, jsonify, abort, make_response, request
from pkboxsoap import PkBoxSOAP
# methods for a "poor man" data persistence based on a yaml file
def acl_load():
try:
with open(cfg['acl'], 'r') as yml_file: acl = yaml.load(yml_file)
except:
logging.warning("failed to read acl file: " + str(cfg['acl']))
acl = dict()
return acl
def acl_update(user_info):
acl = acl_load()
if user_info['id'] not in acl:
acl[user_info['id']] = dict()
for k in user_info:
acl[user_info['id']][k] = user_info[k]
acl_dump(acl)
def acl_dump(acl):
try:
with open(cfg['acl'], 'w+') as yml_file: yml_file.write(yaml.dump(acl))
#logging.info(yaml.dump(acl))
except:
logging.critical("error writing acl file: " + str(cfg['acl']))
def acl_set_status(user_id,status):
acl = acl_load()
if user_id not in acl:
logging.error('user_id ' + str(user_id) + 'not found in acl file:' \
+ str(cfg['acl']))
return None
acl[user_id]['status'] = status
acl_dump(acl)
def acl_get_user_info(user_id):
acl = acl_load()
if user_id not in acl:
return None
return acl[user_id]
# queue consumer
def process_queue(args):
(queue, bot, acl_set_status) = args
while True:
q_msg = queue.get()
logging.info('queue.get() : ' + repr(q_msg))
# auth transaction
if q_msg['type'] == "authorization":
transaction = q_msg['content']
acl_set_status(q_msg['chat_id'],"authorized")
message = 'You have been authorized. Now send me a file to sign!'
try:
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
except:
logging.warning('error sending auth confirmation for transaction '\
+ '\ncontent: ' + str(transaction) \
+ '\nbot: ' + str(bot) \
+ '\nchat_id: ' + str(q_msg['chat_id']) \
+ '\nuser_id: ' + str(q_msg['user_id']) )
else:
logging.info('authorized user: ' + str(q_msg['user_id']))
# sign transaction
elif q_msg['type'] == "signature":
# retrive file info
operation_uuid4 = q_msg['operation_uuid4']
yml_pathname = cfg['storage'] + '/' + operation_uuid4 + '.yml'
try:
with open(yml_pathname, 'r') as yml_file:
docs = yaml.load(yml_file)
#logging.info(repr(docs))
except:
logging.warning('error retriving saved info for operation: '\<|fim▁hole|> + " from " + yml_pathname)
else:
logging.info("process_queue() operation " + operation_uuid4 \
+ " retrived info from " + yml_pathname)
# setup transaction signing otp
transaction = q_msg['content']
#bot.sendMessage(chat_id=q_msg['chat_id'], text=str(transaction))
try:
received_otp = json.loads(transaction['otp'])
except Exception as inst:
logging.debug(inst.args)
sign_otp = dict()
sign_otp['KeyPIN'] = received_otp['KeyPIN']
sign_otp['SessionKey'] = received_otp['SessionKey']
sign_otp['PIN'] = str(transaction['pin'])
logging.debug("process_queue() sign_otp: " + str(json.dumps(sign_otp)) )
# sign
parent_dir = cfg['storage'] + '/' + str(q_msg['chat_id'])
directory = parent_dir + '/' + operation_uuid4 + '/'
for file_item in docs['list']:
# retrive user certificate alias
user_info = acl_get_user_info(q_msg['user_id'])
signer = user_info['cred']['alias']
if 'domain' in cfg['pkbox'] and cfg['pkbox']['domain'] == "open":
signer = '[' + user_info['cred']['domain'] + ']_' + signer
# retrive file info
pathname = directory + file_item['file_id']
filetype = 'p7m'
if re.match(r'PDF document.*', magic.from_file(pathname)):
filetype = 'pdf'
# call pkbox for signing
logging.info("process_queue() operation " + operation_uuid4 \
+ " signing file: " + pathname)
#bot.sendMessage(chat_id=q_msg['chat_id'], text=str(json.dumps(sign_otp)))
result = sign_service.envelope(pathname, filetype, signer,
str(transaction['pin']),
str(json.dumps(sign_otp)))
# evaluate result
index = docs['list'].index(file_item)
if result == 'ok':
if filetype == "pdf":
docs['list'][index]['new_name'] = \
'SIGNED_' + docs['list'][index]['file_name']
else:
docs['list'][index]['new_name'] = \
docs['list'][index]['file_name'] + '.p7m'
logging.info('user ' + str(q_msg['user_id']) \
+ ' signed documents in operation: ' \
+ operation_uuid4 )
else:
docs['list'][index]['result'] = str(result)
logging.warning("envelope() returned " + str(result)
+ ' signing document for operation:'\
+ operation_uuid4)
# TODO:
# if pdfsign fail because protected with a password)
# it should return a msg to request sign it as p7m
# send message and signed files
for file_item in docs['list']:
pathname = directory + file_item['file_id']
if not 'new_name' in file_item:
message = 'Error signing file: ' + file_item['file_name'] \
+ " with result " + file_item['result']
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
elif not os.path.exists(pathname):
logging.warning("not found " + pathname)
message = 'Error reading signed file: ' + file_item['new_name']
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
else:
bot.sendDocument( chat_id=q_msg['chat_id'],
document=open(pathname, 'rb'),
filename=file_item['new_name'])
os.remove(pathname)
# remove yaml file and operation dir
os.remove(yml_pathname)
os.rmdir(directory)
# try remove also chat_id dir if empty
try:
os.rmdir(parent_dir)
except:
pass
q.task_done()
# flask webserver to handle callback
app = Flask(__name__)
# function to start webserver as a thread
def flask_thread():
if 'listenaddr' in cfg['webserver']:
listenaddr = cfg['webserver']['listenaddr']
else:
listenaddr = '127.0.0.1'
app.run(host=listenaddr,debug=True, use_reloader=False)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/api/v1.0/authorize/<int:chat_id>/<int:user_id>', methods=['POST'])
def get_authorization(chat_id,user_id):
if any([ not request.json,
not user_id,
not chat_id ]):
logging.debug(request)
abort(400)
# process callback
try:
for transaction in request.json:
if transaction['approved'] == 1:
q_msg = dict()
q_msg['user_id'] = user_id
q_msg['chat_id'] = chat_id
q_msg['type'] = "authorization"
q_msg['content'] = transaction
q.put(q_msg)
except:
logging.error("failed processing transaction callback")
return jsonify({'authorization': 'received'}), 200
@app.route('/api/v1.0/sign/<int:chat_id>/<int:user_id>/<string:operation_uuid4>', methods=['POST'])
def get_signature(chat_id,user_id,operation_uuid4):
if any([ not request.json,
not operation_uuid4,
not chat_id,
not user_id ]):
logging.debug(request)
abort(400)
# process callback
try:
logging.debug(request)
for transaction in request.json:
if transaction['approved'] == 1:
q_msg = dict()
q_msg['chat_id'] = chat_id
q_msg['user_id'] = user_id
q_msg['operation_uuid4'] = operation_uuid4
q_msg['type'] = "signature"
q_msg['content'] = transaction
q.put(q_msg)
except:
logging.error("failed processing signature transaction callback")
return jsonify({'authorization': 'received'}), 200
"""
Example of a auth transaction APPROVED:
{
'pin': '',
'result': [],
'applicationId': None,
'transactionId': '954593fc-3162-4077-9731-af8aab27dda5',
'approved': 1,
'otp': 'E77337B8CC3FD9C5DB805B123259149BC9313A169D9319157187D91205214CFC',
'antiFraud': '[]'
}
Example of a sign transaction APPROVED:
{
'result': ['@pin'],
'pin': 'xI1lhMbAiALTCZ3I71bQIQ==',
'otp': '{
"SessionKey":"Z4NnyTUgUePgSNSAgPiiysY2yIB+lSZg1xXUArOK1zJq11JqqCJ3plTGysynjeu1uhHSM\\/4SvaBHqDjL6NIjmustOITo2dOf3DVzTyk3RIjCh9XWANNWFhgaMMmWI6B8NBA\\/tQ6+bztTt4PJ3OJwwdAI0u\\/EuDZLSCvdcUfohyg=",
"KeyPIN":"BNcuQZWbdcpZeMESzTPfKA==",
"Ciphered":true
}',
'applicationId': None,
'approved': 1,
'docSignatureResult': '[]',
'transactionId': 'd6d76bdc-23ab-473d-b9c8-a9632c147656',
'antiFraud': '[]'
}
Example of a transaction REFUSED:
{
'approved': 2,
'applicationId': None,
'transactionId': '8f52c58f-9f69-44e9-b716-d7dc1c69a6b4'
}
"""
############################
# define telegram functions
def start(bot, update):
bot.sendMessage(chat_id=update.message.chat_id,
text="To use this bot you should have:\n" \
+ "* an e-sign certficate\n" \
+ "* the Valid mobile app installed with the e-sign OTP enabled\n\n" \
+ "Link your Valid account with the command /link followed " \
+ "by the username (usually the email)\n\n" \
+ "An authorization request will be sent to your Valid mobile app.")
def status(bot, update):
user_info = acl_get_user_info(update.message.from_user.id)
if not user_info:
return
if user_info['status'] == "authorized":
text="You are already authorized to use Valid account *" \
+ str(user_info['time4mind_account']) +'*'
elif user_info['status'] == "waiting authorization":
text="I'm waiting your authorization from Valid app\n" + str(user_info)
else:
text="You are not yet authorized"
bot.sendMessage(chat_id=update.message.chat_id, text=text, parse_mode="Markdown")
def link(bot, update, args):
# check arguments
if len(args) != 1:
text = 'Please, pass me only one string without spaces'
bot.sendMessage(chat_id=update.message.chat_id, text=text)
return
# build telegram user data structure
user_info = dict()
user_info['id'] = update.message.from_user.id
user_info['time4mind_account'] = args[0]
user_info['first_name'] = update.message.from_user.first_name
user_info['last_name'] = update.message.from_user.last_name
user_info['username'] = update.message.from_user.username
user_info['chat_id'] = update.message.chat_id
user_info['status'] = 'waiting authorization'
if user_info['last_name']:
user_info['display_name'] = user_info['first_name'] + ' ' + user_info['last_name']
else:
user_info['display_name'] = user_info['first_name']
logging.info("/link command received from user: " + user_info['time4mind_account'])
# look for credentials
cred = time4mind.getMobileActiveCredentials(user_info['time4mind_account'])
if len(cred) < 1:
logging.warning("/link command did not found valid credentials for user: " + user_info['time4mind_account'])
message = 'Error sending an authorization request to this account'
bot.sendMessage(chat_id=update.message.chat_id, text=message)
else:
# TODO: choice if credentials > 1
user_info['cred'] = cred[0]
# send request
route = '/api/v1.0/authorize/' + str(user_info['chat_id']) \
+ '/' + str(user_info['id'])
try:
user_info['last_transaction'] = time4mind.authorize(user_info,route)
# save user data
acl_update(user_info)
# message user
message = 'I sent an authorization request to your Valid app'
bot.sendMessage(chat_id=update.message.chat_id, text=message)
except:
logging.warning("failed to request account usage authorization")
def sign_single_document(bot, update):
user_info = acl_get_user_info(update.message.from_user.id)
chat_id = update.message.chat_id
operation_uuid4 = str(uuid.uuid4())
logging.info("sign_single_document() operation " + operation_uuid4 \
+ " for user " + str(user_info))
if not user_info or 'status' not in user_info:
text="You are not yet authorized"
elif user_info['status'] == "waiting authorization":
text="Sorry but I'm still waiting your authorization from Valid app\n" \
+ str(user_info)
elif user_info['status'] == "authorized":
doc_info = update.message.document.__dict__
# {'mime_type': 'application/pdf', 'file_id': 'BQADBAADbwADNnbhUZSE6H4S95PIAg', 'file_name': '2017_ci_01_28.pdf', 'file_size': 71689}
file_id = doc_info['file_id']
file_info = bot.getFile(file_id)
# {'file_size': 71689, 'file_path': 'https://api.telegram.org/file/bot333930621:AAGJ4XLJ9UxQvfTEQXeKwOkiAvhTE5rdRJE/documents/file_0.pdf', 'file_id': 'BQADBAADbwADNnbhUZSE6H4S95PIAg'}
doc_info['file_path'] = file_info['file_path']
doc_info['href'] = cfg['webserver']['endpoint'] + '/api/v1.0/file/' \
+ operation_uuid4 + '/' + file_id
docs = { 'operation_uuid4': operation_uuid4,
'list': [ doc_info ] }
# save data to yaml
directory = cfg['storage']
if not os.path.exists(directory):
try:
os.makedirs(directory)
except:
logging.critical("error makedirs: " + str(directory))
yml_pathname = directory + '/' + operation_uuid4 + '.yml'
try:
with open(yml_pathname, 'w+') as yml_file: yml_file.write(yaml.dump(docs))
except:
logging.critical("error writing yml file: " + str(yml_pathname))
else:
logging.info("sign_single_document() operation " + operation_uuid4 \
+ " saved docs to " + yml_pathname)
# request to sign
signMobileRequest(user_info,docs)
text="Request to sign sent to your Valid app"
# download file
directory = cfg['storage'] + '/' + str(chat_id) + '/' + operation_uuid4 + '/'
if not os.path.exists(directory):
os.makedirs(directory)
with urllib.request.urlopen(doc_info['file_path']) as response, \
open(directory + doc_info['file_id'], 'wb') as out_file:
shutil.copyfileobj(response, out_file)
bot.sendMessage(chat_id=update.message.chat_id, text=text, parse_mode="Markdown")
def signMobileRequest(user_info,docs):
title = 'Signature Request'
sender = '@CloudSignature_Bot'
message = 'Documents to sign:'
for file_item in docs['list']:
message += '<li><a href=\"' + file_item['href'] + '\">' \
+ file_item['file_name'] + '</a></li>'
route = '/api/v1.0/sign/' \
+ str(user_info['chat_id']) + '/' \
+ str(user_info['id']) + '/' \
+ str(docs['operation_uuid4'])
try:
user_info['last_transaction'] = time4mind.signMobile(
user_info['cred']['otpId'],
user_info['cred']['otpProvider'],
title,sender,message,
user_info['cred']['label'],route)
except:
logging.warning("failed to request signature authorization")
else:
logging.info("signMobileRequest() sent to user: " + str(user_info['id']) \
+ " - operation: " + str(docs['operation_uuid4']) \
+ " - transaction: " + str(user_info['last_transaction']) )
try:
acl_update(user_info)
except:
logging.warning("failed to save transaction data")
def unknown_cmd(bot, update):
bot.sendMessage(chat_id=update.message.chat_id, text="Sorry, I didn't understand that command.")
def filter_any(msg):
logging.debug('Received message_id: '+str(msg.message_id))
if msg.text:
logging.debug('text: '+msg.text)
elif msg.document:
logging.debug('document: '+msg.document.file_name)
return False
###############
# Main section
# read configuration and setup time4mind class
with open(sys.argv[1], 'r') as yml_file: cfg = yaml.load(yml_file)
time4mind = Time4Mind(cfg)
# setup logger
logging.basicConfig(level=logging.DEBUG,
filename=cfg['logfile'],
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M')
# setup telegram updater and dispatchers
updater = Updater(token=cfg['bot']['token'])
dispatcher = updater.dispatcher
# begin telegram commands
# trace messages
trace_handler = MessageHandler(filter_any, lambda : True )
dispatcher.add_handler(trace_handler)
# start command
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
# link command
link_handler = CommandHandler('link', link, pass_args=True)
dispatcher.add_handler(link_handler)
# status command
status_handler = CommandHandler('status', status)
dispatcher.add_handler(status_handler)
# sign document filter
sign_handler = MessageHandler(Filters.document, sign_single_document)
dispatcher.add_handler(sign_handler)
# unknown commands
unknown_handler = MessageHandler(Filters.command, unknown_cmd)
dispatcher.add_handler(unknown_handler)
# end telegram commands
# setup queue
q = Queue(maxsize=100)
bot = Bot(cfg['bot']['token'])
dispatcher.run_async(process_queue,(q,bot,acl_set_status))
# setup pkbox handler to sign
sign_service = PkBoxSOAP(cfg['pkbox'])
# run updater and webserver as a threads
webserver_thread = Thread(target=flask_thread, name='webserver')
webserver_thread.start()
updater_thread = Thread(target=updater.start_polling, name='updater')
updater_thread.start()<|fim▁end|>
|
+ operation_uuid4 \
|
<|file_name|>gemm_int8.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Example code to perform int8 GEMM"
import logging
import sys
import numpy as np
import tvm<|fim▁hole|>
DO_TUNING = True
PRETUNED_INDEX = 75333
intrin_dp4a = dp4a("local", "local", "local")
@autotvm.template
def gemm_int8(n, m, l):
A = te.placeholder((n, l), name="A", dtype="int8")
B = te.placeholder((m, l), name="B", dtype="int8")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
cfg = autotvm.get_config()
s = te.create_schedule(C.op)
y, x = C.op.axis
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
k = CC.op.reduce_axis[0]
cfg.define_split(
"tile_k",
cfg.axis(k),
num_outputs=3,
filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1],
)
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].tensorize(ki, intrin_dp4a)
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
def block_size_filter(entity):
return (
entity.size[0] * 2 >= entity.size[1] * 2
and entity.size[1] <= 16
and entity.size[3] <= 4
)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter)
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
_, xi = s[stage].split(stage.op.axis[1], factor=4)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob("storage_align", [16, 48])
for stage in [AA, BB]:
s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0)
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2])
tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2])
_, xi = s[stage].split(xi, factor=16)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
cfg.define_knob("auto_unroll_max_step", [512, 1500])
s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(by, "unroll_explicit", False)
cfg.add_flop(n * m * l * 2)
return s, [A, B, C]
if __name__ == "__main__":
N = 2048
n = m = l = N
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda")
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)
log_name = "gemm_int8.log"
if DO_TUNING:
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(
n_trial=1000,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)],
)
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
else:
config = task.config_space.get(PRETUNED_INDEX)
dispatch_context = autotvm.task.ApplyConfig(config)
print("Using pretuned config:")
print(config)
with dispatch_context:
with tvm.target.Target("cuda"):
s, arg_bufs = gemm_int8(n, m, l)
f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8")
dev = tvm.device("cuda", 0)
a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8")
b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
)
num_ops = 2 * l * m * n
num_runs = 1000
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GOPS = num_ops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS))<|fim▁end|>
|
from tvm import te
from tvm import autotvm
from tvm.topi.cuda.tensor_intrin import dp4a
|
<|file_name|>AbstractHelper.js<|end_file_name|><|fim▁begin|>/*
This file is part of Ext JS 4.2
Copyright (c) 2011-2013 Sencha Inc
Contact: http://www.sencha.com/contact
GNU General Public License Usage
This file may be used under the terms of the GNU General Public License version 3.0 as
published by the Free Software Foundation and appearing in the file LICENSE included in the
packaging of this file.
Please review the following information to ensure the GNU General Public License version 3.0
requirements will be met: http://www.gnu.org/copyleft/gpl.html.
If you are unsure which license is appropriate for your use, please contact the sales department
at http://www.sencha.com/contact.
Build date: 2013-05-16 14:36:50 (f9be68accb407158ba2b1be2c226a6ce1f649314)
*/
// @tag dom,core
// @require AbstractQuery.js
/**
* Abstract base class for {@link Ext.dom.Helper}.
* @private
*/
Ext.define('Ext.dom.AbstractHelper', {
emptyTags : /^(?:br|frame|hr|img|input|link|meta|range|spacer|wbr|area|param|col)$/i,
confRe : /^(?:tag|children|cn|html|tpl|tplData)$/i,
endRe : /end/i,
styleSepRe: /\s*(?::|;)\s*/,
// Since cls & for are reserved words, we need to transform them
attributeTransform: { cls : 'class', htmlFor : 'for' },
closeTags: {},
decamelizeName : (function () {
var camelCaseRe = /([a-z])([A-Z])/g,
cache = {};
function decamel (match, p1, p2) {
return p1 + '-' + p2.toLowerCase();
}
return function (s) {
return cache[s] || (cache[s] = s.replace(camelCaseRe, decamel));
};
}()),
generateMarkup: function(spec, buffer) {
var me = this,
specType = typeof spec,
attr, val, tag, i, closeTags;
if (specType == "string" || specType == "number") {
buffer.push(spec);
} else if (Ext.isArray(spec)) {
for (i = 0; i < spec.length; i++) {
if (spec[i]) {
me.generateMarkup(spec[i], buffer);
}
}
} else {
tag = spec.tag || 'div';
buffer.push('<', tag);
for (attr in spec) {
if (spec.hasOwnProperty(attr)) {
val = spec[attr];
if (!me.confRe.test(attr)) {
if (typeof val == "object") {
buffer.push(' ', attr, '="');
me.generateStyles(val, buffer).push('"');
} else {
buffer.push(' ', me.attributeTransform[attr] || attr, '="', val, '"');
}
}
}
}
// Now either just close the tag or try to add children and close the tag.
if (me.emptyTags.test(tag)) {
buffer.push('/>');
} else {
buffer.push('>');
// Apply the tpl html, and cn specifications
if ((val = spec.tpl)) {
val.applyOut(spec.tplData, buffer);
}
if ((val = spec.html)) {
buffer.push(val);
}
if ((val = spec.cn || spec.children)) {
me.generateMarkup(val, buffer);
}
// we generate a lot of close tags, so cache them rather than push 3 parts
closeTags = me.closeTags;
buffer.push(closeTags[tag] || (closeTags[tag] = '</' + tag + '>'));
}
}
return buffer;
},
/**
* Converts the styles from the given object to text. The styles are CSS style names
* with their associated value.
*
* The basic form of this method returns a string:
*
* var s = Ext.DomHelper.generateStyles({
* backgroundColor: 'red'
* });
*
* // s = 'background-color:red;'
*
* Alternatively, this method can append to an output array.
*
* var buf = [];
*
* ...
*
* Ext.DomHelper.generateStyles({
* backgroundColor: 'red'
* }, buf);
*
* In this case, the style text is pushed on to the array and the array is returned.
*
* @param {Object} styles The object describing the styles.
* @param {String[]} [buffer] The output buffer.
* @return {String/String[]} If buffer is passed, it is returned. Otherwise the style
* string is returned.
*/
generateStyles: function (styles, buffer) {
var a = buffer || [],
name;
for (name in styles) {
if (styles.hasOwnProperty(name)) {
a.push(this.decamelizeName(name), ':', styles[name], ';');
}
}
return buffer || a.join('');
},
/**
* Returns the markup for the passed Element(s) config.
* @param {Object} spec The DOM object spec (and children)
* @return {String}
*/
markup: function(spec) {
if (typeof spec == "string") {
return spec;
}
var buf = this.generateMarkup(spec, []);
return buf.join('');
},
/**
* Applies a style specification to an element.
* @param {String/HTMLElement} el The element to apply styles to
* @param {String/Object/Function} styles A style specification string e.g. 'width:100px', or object in the form {width:'100px'}, or
* a function which returns such a specification.
*/
applyStyles: function(el, styles) {
if (styles) {
var i = 0,
len;
el = Ext.fly(el, '_applyStyles');
if (typeof styles == 'function') {
styles = styles.call();
}
if (typeof styles == 'string') {
styles = Ext.util.Format.trim(styles).split(this.styleSepRe);
for (len = styles.length; i < len;) {
el.setStyle(styles[i++], styles[i++]);
}
} else if (Ext.isObject(styles)) {
el.setStyle(styles);
}
}
},
/**
* Inserts an HTML fragment into the DOM.
* @param {String} where Where to insert the html in relation to el - beforeBegin, afterBegin, beforeEnd, afterEnd.
*
* For example take the following HTML: `<div>Contents</div>`
*
* Using different `where` values inserts element to the following places:
*
* - beforeBegin: `<HERE><div>Contents</div>`
* - afterBegin: `<div><HERE>Contents</div>`
* - beforeEnd: `<div>Contents<HERE></div>`
* - afterEnd: `<div>Contents</div><HERE>`
*
* @param {HTMLElement/TextNode} el The context element
* @param {String} html The HTML fragment<|fim▁hole|> */
insertHtml: function(where, el, html) {
var hash = {},
setStart,
range,
frag,
rangeEl;
where = where.toLowerCase();
// add these here because they are used in both branches of the condition.
hash['beforebegin'] = ['BeforeBegin', 'previousSibling'];
hash['afterend'] = ['AfterEnd', 'nextSibling'];
range = el.ownerDocument.createRange();
setStart = 'setStart' + (this.endRe.test(where) ? 'After' : 'Before');
if (hash[where]) {
range[setStart](el);
frag = range.createContextualFragment(html);
el.parentNode.insertBefore(frag, where == 'beforebegin' ? el : el.nextSibling);
return el[(where == 'beforebegin' ? 'previous' : 'next') + 'Sibling'];
}
else {
rangeEl = (where == 'afterbegin' ? 'first' : 'last') + 'Child';
if (el.firstChild) {
range[setStart](el[rangeEl]);
frag = range.createContextualFragment(html);
if (where == 'afterbegin') {
el.insertBefore(frag, el.firstChild);
}
else {
el.appendChild(frag);
}
}
else {
el.innerHTML = html;
}
return el[rangeEl];
}
throw 'Illegal insertion point -> "' + where + '"';
},
/**
* Creates new DOM element(s) and inserts them before el.
* @param {String/HTMLElement/Ext.Element} el The context element
* @param {Object/String} o The DOM object spec (and children) or raw HTML blob
* @param {Boolean} [returnElement] true to return a Ext.Element
* @return {HTMLElement/Ext.Element} The new node
*/
insertBefore: function(el, o, returnElement) {
return this.doInsert(el, o, returnElement, 'beforebegin');
},
/**
* Creates new DOM element(s) and inserts them after el.
* @param {String/HTMLElement/Ext.Element} el The context element
* @param {Object} o The DOM object spec (and children)
* @param {Boolean} [returnElement] true to return a Ext.Element
* @return {HTMLElement/Ext.Element} The new node
*/
insertAfter: function(el, o, returnElement) {
return this.doInsert(el, o, returnElement, 'afterend', 'nextSibling');
},
/**
* Creates new DOM element(s) and inserts them as the first child of el.
* @param {String/HTMLElement/Ext.Element} el The context element
* @param {Object/String} o The DOM object spec (and children) or raw HTML blob
* @param {Boolean} [returnElement] true to return a Ext.Element
* @return {HTMLElement/Ext.Element} The new node
*/
insertFirst: function(el, o, returnElement) {
return this.doInsert(el, o, returnElement, 'afterbegin', 'firstChild');
},
/**
* Creates new DOM element(s) and appends them to el.
* @param {String/HTMLElement/Ext.Element} el The context element
* @param {Object/String} o The DOM object spec (and children) or raw HTML blob
* @param {Boolean} [returnElement] true to return a Ext.Element
* @return {HTMLElement/Ext.Element} The new node
*/
append: function(el, o, returnElement) {
return this.doInsert(el, o, returnElement, 'beforeend', '', true);
},
/**
* Creates new DOM element(s) and overwrites the contents of el with them.
* @param {String/HTMLElement/Ext.Element} el The context element
* @param {Object/String} o The DOM object spec (and children) or raw HTML blob
* @param {Boolean} [returnElement] true to return a Ext.Element
* @return {HTMLElement/Ext.Element} The new node
*/
overwrite: function(el, o, returnElement) {
el = Ext.getDom(el);
el.innerHTML = this.markup(o);
return returnElement ? Ext.get(el.firstChild) : el.firstChild;
},
doInsert: function(el, o, returnElement, pos, sibling, append) {
var newNode = this.insertHtml(pos, Ext.getDom(el), this.markup(o));
return returnElement ? Ext.get(newNode, true) : newNode;
}
});<|fim▁end|>
|
* @return {HTMLElement} The new node
|
<|file_name|>crypto_util.py<|end_file_name|><|fim▁begin|>"""Let's Encrypt client crypto utility functions.
.. todo:: Make the transition to use PSS rather than PKCS1_v1_5 when the server
is capable of handling the signatures.
"""
import logging
import os
import OpenSSL
import zope.component
from acme import crypto_util as acme_crypto_util
from acme import jose
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
logger = logging.getLogger(__name__)
# High level functions
def init_save_key(key_size, key_dir, keyname="key-letsencrypt.pem"):
"""Initializes and saves a privkey.
Inits key and saves it in PEM format on the filesystem.
.. note:: keyname is the attempted filename, it may be different if a file
already exists at the path.
:param int key_size: RSA key size in bits
:param str key_dir: Key save directory.
:param str keyname: Filename of key
:returns: Key
:rtype: :class:`letsencrypt.le_util.Key`
:raises ValueError: If unable to generate the key given key_size.
"""
try:
key_pem = make_key(key_size)
except ValueError as err:
logger.exception(err)
raise err
config = zope.component.getUtility(interfaces.IConfig)
# Save file
le_util.make_or_verify_dir(key_dir, 0o700, os.geteuid(),
config.strict_permissions)
key_f, key_path = le_util.unique_file(
os.path.join(key_dir, keyname), 0o600)
key_f.write(key_pem)
key_f.close()
logger.info("Generating key (%d bits): %s", key_size, key_path)
return le_util.Key(key_path, key_pem)
def init_save_csr(privkey, names, path, csrname="csr-letsencrypt.pem"):
"""Initialize a CSR with the given private key.
:param privkey: Key to include in the CSR
:type privkey: :class:`letsencrypt.le_util.Key`
:param set names: `str` names to include in the CSR
:param str path: Certificate save directory.
:returns: CSR
:rtype: :class:`letsencrypt.le_util.CSR`
"""
csr_pem, csr_der = make_csr(privkey.pem, names)
config = zope.component.getUtility(interfaces.IConfig)
# Save CSR
le_util.make_or_verify_dir(path, 0o755, os.geteuid(),
config.strict_permissions)
csr_f, csr_filename = le_util.unique_file(
os.path.join(path, csrname), 0o644)
csr_f.write(csr_pem)
csr_f.close()
logger.info("Creating CSR: %s", csr_filename)
return le_util.CSR(csr_filename, csr_der, "der")
# Lower level functions
def make_csr(key_str, domains):
"""Generate a CSR.
:param str key_str: PEM-encoded RSA key.
:param list domains: Domains included in the certificate.
.. todo:: Detect duplicates in `domains`? Using a set doesn't
preserve order...
:returns: new CSR in PEM and DER form containing all domains
:rtype: tuple
"""
assert domains, "Must provide one or more hostnames for the CSR."
pkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key_str)
req = OpenSSL.crypto.X509Req()
req.get_subject().CN = domains[0]
# TODO: what to put into req.get_subject()?
# TODO: put SAN if len(domains) > 1
req.add_extensions([
OpenSSL.crypto.X509Extension(
"subjectAltName",
critical=False,
value=", ".join("DNS:%s" % d for d in domains)
),
])
req.set_pubkey(pkey)
req.sign(pkey, "sha256")<|fim▁hole|> return tuple(OpenSSL.crypto.dump_certificate_request(method, req)
for method in (OpenSSL.crypto.FILETYPE_PEM,
OpenSSL.crypto.FILETYPE_ASN1))
# WARNING: the csr and private key file are possible attack vectors for TOCTOU
# We should either...
# A. Do more checks to verify that the CSR is trusted/valid
# B. Audit the parsing code for vulnerabilities
def valid_csr(csr):
"""Validate CSR.
Check if `csr` is a valid CSR for the given domains.
:param str csr: CSR in PEM.
:returns: Validity of CSR.
:rtype: bool
"""
try:
req = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr)
return req.verify(req.get_pubkey())
except OpenSSL.crypto.Error as error:
logger.debug(error, exc_info=True)
return False
def csr_matches_pubkey(csr, privkey):
"""Does private key correspond to the subject public key in the CSR?
:param str csr: CSR in PEM.
:param str privkey: Private key file contents (PEM)
:returns: Correspondence of private key to CSR subject public key.
:rtype: bool
"""
req = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr)
pkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey)
try:
return req.verify(pkey)
except OpenSSL.crypto.Error as error:
logger.debug(error, exc_info=True)
return False
def make_key(bits):
"""Generate PEM encoded RSA key.
:param int bits: Number of bits, at least 1024.
:returns: new RSA key in PEM form with specified number of bits
:rtype: str
"""
assert bits >= 1024 # XXX
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
return OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
def valid_privkey(privkey):
"""Is valid RSA private key?
:param str privkey: Private key file contents in PEM
:returns: Validity of private key.
:rtype: bool
"""
try:
return OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, privkey).check()
except (TypeError, OpenSSL.crypto.Error):
return False
def pyopenssl_load_certificate(data):
"""Load PEM/DER certificate.
:raises errors.Error:
"""
openssl_errors = []
for file_type in (OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1):
try:
return OpenSSL.crypto.load_certificate(file_type, data), file_type
except OpenSSL.crypto.Error as error: # TODO: other errors?
openssl_errors.append(error)
raise errors.Error("Unable to load: {0}".format(",".join(
str(error) for error in openssl_errors)))
def _get_sans_from_cert_or_req(cert_or_req_str, load_func,
typ=OpenSSL.crypto.FILETYPE_PEM):
try:
cert_or_req = load_func(typ, cert_or_req_str)
except OpenSSL.crypto.Error as error:
logger.exception(error)
raise
# pylint: disable=protected-access
return acme_crypto_util._pyopenssl_cert_or_req_san(cert_or_req)
def get_sans_from_cert(cert, typ=OpenSSL.crypto.FILETYPE_PEM):
"""Get a list of Subject Alternative Names from a certificate.
:param str cert: Certificate (encoded).
:param typ: `OpenSSL.crypto.FILETYPE_PEM` or `OpenSSL.crypto.FILETYPE_ASN1`
:returns: A list of Subject Alternative Names.
:rtype: list
"""
return _get_sans_from_cert_or_req(
cert, OpenSSL.crypto.load_certificate, typ)
def get_sans_from_csr(csr, typ=OpenSSL.crypto.FILETYPE_PEM):
"""Get a list of Subject Alternative Names from a CSR.
:param str csr: CSR (encoded).
:param typ: `OpenSSL.crypto.FILETYPE_PEM` or `OpenSSL.crypto.FILETYPE_ASN1`
:returns: A list of Subject Alternative Names.
:rtype: list
"""
return _get_sans_from_cert_or_req(
csr, OpenSSL.crypto.load_certificate_request, typ)
def dump_pyopenssl_chain(chain, filetype=OpenSSL.crypto.FILETYPE_PEM):
"""Dump certificate chain into a bundle.
:param list chain: List of `OpenSSL.crypto.X509` (or wrapped in
`acme.jose.ComparableX509`).
"""
# XXX: returns empty string when no chain is available, which
# shuts up RenewableCert, but might not be the best solution...
def _dump_cert(cert):
if isinstance(cert, jose.ComparableX509):
# pylint: disable=protected-access
cert = cert._wrapped
return OpenSSL.crypto.dump_certificate(filetype, cert)
# assumes that OpenSSL.crypto.dump_certificate includes ending
# newline character
return "".join(_dump_cert(cert) for cert in chain)<|fim▁end|>
| |
<|file_name|>clean_mac_info_plist.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "KombatKoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"<|fim▁end|>
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the KombatKoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
|
<|file_name|>config.ts<|end_file_name|><|fim▁begin|>export default function($stateProvider: angular.ui.IStateProvider, $urlRouterProvider: angular.ui.IUrlRouterProvider) {
'ngInject';
$stateProvider.state('app.home', {
url: "/home",
views: {<|fim▁hole|> }
}
});
$urlRouterProvider.otherwise("/home");
}<|fim▁end|>
|
'content@app': {
template: require("./index.html"),
controller: "HomeController as homeCtrl"
|
<|file_name|>connect.test.js<|end_file_name|><|fim▁begin|>//@ sourceMappingURL=connect.test.map
// Generated by CoffeeScript 1.6.1
(function() {
var assert, async, wongo,
__hasProp = {}.hasOwnProperty;
assert = require('assert');
async = require('async');
wongo = require('../lib/wongo');
describe('Wongo.connect()', function() {
it('should connect to the database', function(done) {<|fim▁hole|> var _type, _types;
_types = (function() {
var _ref, _results;
_ref = wongo.schemas;
_results = [];
for (_type in _ref) {
if (!__hasProp.call(_ref, _type)) continue;
_results.push(_type);
}
return _results;
})();
return async.each(_types, function(_type, nextInLoop) {
return wongo.clear(_type, nextInLoop);
}, done);
});
});
}).call(this);<|fim▁end|>
|
wongo.connect(process.env.DB_URL);
return done();
});
return it('should clear every registered schema', function(done) {
|
<|file_name|>validate.go<|end_file_name|><|fim▁begin|>// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"github.com/GoogleCloudPlatform/config-validator/pkg/api/validator"
"github.com/GoogleCloudPlatform/terraform-validator/converters/google"
"github.com/GoogleCloudPlatform/terraform-validator/tfgcv"
"github.com/golang/protobuf/jsonpb"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"go.uber.org/zap"
)
const validateDesc = `
Validate that a terraform plan conforms to a Constraint Framework
policy library written to expect Google CAI (Cloud Asset Inventory) data.
Unsupported terraform resources (see: "terraform-validate list-supported-resources")
are skipped.
Policy violations will result in an exit code of 2.
Example:
terraform-validator validate ./example/terraform.tfplan \
--project my-project \
--ancestry organization/my-org/folder/my-folder \
--policy-path ./path/to/my/gcv/policies
`
type validateOptions struct {
project string
ancestry string
offline bool
policyPath string
outputJSON bool
dryRun bool
rootOptions *rootOptions
readPlannedAssets tfgcv.ReadPlannedAssetsFunc
validateAssets tfgcv.ValidateAssetsFunc
}
func newValidateCmd(rootOptions *rootOptions) *cobra.Command {
o := &validateOptions{
rootOptions: rootOptions,
readPlannedAssets: tfgcv.ReadPlannedAssets,
validateAssets: tfgcv.ValidateAssets,
}
cmd := &cobra.Command{
Use: "validate TFPLAN_JSON --policy-path=/path/to/policy/library",
Short: "Validate that a terraform plan conforms to Constraint Framework policies",
Long: validateDesc,
PreRunE: func(c *cobra.Command, args []string) error {
return o.validateArgs(args)
},
RunE: func(c *cobra.Command, args []string) error {
if o.dryRun {
return nil
}
return o.run(args[0])
},
}
cmd.Flags().StringVar(&o.policyPath, "policy-path", "", "Path to directory containing validation policies")
cmd.MarkFlagRequired("policy-path")
cmd.Flags().StringVar(&o.project, "project", "", "Provider project override (override the default project configuration assigned to the google terraform provider when validating resources)")
cmd.Flags().StringVar(&o.ancestry, "ancestry", "", "Override the ancestry location of the project when validating resources")
cmd.Flags().BoolVar(&o.offline, "offline", false, "Do not make network requests")
cmd.Flags().BoolVar(&o.outputJSON, "output-json", false, "Print violations as JSON")
cmd.Flags().BoolVar(&o.dryRun, "dry-run", false, "Only parse & validate args")
cmd.Flags().MarkHidden("dry-run")
return cmd
}
func (o *validateOptions) validateArgs(args []string) error {
if len(args) != 1 {
return errors.New("missing required argument TFPLAN_JSON")
}
if o.offline && o.ancestry == "" {
return errors.New("please set ancestry via --ancestry in offline mode")
}
return nil
}
func (o *validateOptions) run(plan string) error {
ctx := context.Background()
content, err := ioutil.ReadFile(plan)
if err != nil {
return fmt.Errorf("unable to read file %s", plan)
}
// if input file is not Asset, try convert
var assets []google.Asset
if err := json.Unmarshal(content, &assets); err != nil {
var err error
assets, err = o.readPlannedAssets(ctx, plan, o.project, o.ancestry, o.offline, false, o.rootOptions.errorLogger)
if err != nil {
if errors.Cause(err) == tfgcv.ErrParsingProviderProject {
return errors.New("unable to parse provider project, please use --project flag")
}
return errors.Wrap(err, "converting tfplan to CAI assets")
}
}
violations, err := o.validateAssets(ctx, assets, o.policyPath)
if err != nil {
return errors.Wrap(err, "validating")
}
if o.rootOptions.useStructuredLogging {
msg := "No violations found"
if len(violations) > 0 {
msg = "Violations found"
}
o.rootOptions.outputLogger.Info(
msg,
zap.Any("resource_body", violations),
)
if len(violations) > 0 {
return errViolations
}
return nil
}
// Legacy behavior
if len(violations) > 0 {
if o.outputJSON {
marshaller := &jsonpb.Marshaler{}
auditResult := &validator.AuditResponse{}
auditResult.Violations = violations
if err := marshaller.Marshal(os.Stdout, auditResult); err != nil {
return errors.Wrap(err, "marshalling violations to json")
}<|fim▁hole|> for _, v := range violations {
fmt.Printf("Constraint %v on resource %v: %v\n\n",
v.Constraint,
v.Resource,
v.Message,
)
}
}
return errViolations
}
if !o.outputJSON {
fmt.Println("No violations found.")
}
return nil
}<|fim▁end|>
|
} else {
fmt.Print("Found Violations:\n\n")
|
<|file_name|>SimpleQueue.java<|end_file_name|><|fim▁begin|>package ru.job4j.list;
/**
* Created on 24.07.17.
* Simple queue realization.<|fim▁hole|>public class SimpleQueue<E> extends SimpleLinkedList<E> {
/**
* Link to the first element.
*/
private Node<E> first = null;
/**
* Link to the last element.
*/
private Node<E> last = null;
/**
* Size.
*/
private int size = 0;
/**
* Removes first element from list, and returns it.
* @return - first element, or null if size == 0.
*/
public E poll() {
E temp = this.first.item;
if (this.size > 0) {
this.first = this.first.next;
this.size--;
}
return temp;
}
/**
* Removes first element from list, and returns it.
* @return - first element, or null if size == 0.
*/
public E remove() {
return this.poll();
}
/**
* Returns first element from the list, without deleting.
* @return first element from the list.
*/
public E peek() {
return first.item;
}
/**
* adding element to the end of the list.
* @param e - element to add.
*/
public void offer(E e) {
Node<E> l = last;
Node<E> newNode = new Node<>(e, l, null);
if (l == null) {
first = newNode;
last = newNode;
} else {
l.next = newNode;
last = l.next;
}
size++;
}
/**
* Private class to store elements in list.
* @param <E> - class to store.
*/
private class Node<E> {
/**
* main element.
*/
private E item;
/**
* Link to previous item.
*/
private Node<E> previous;
/**
* link ot next item.
*/
private Node<E> next;
/**
* Main constructor.
* @param item - main item.
* @param previous - link to previous item.
* @param next - link to next item.
*/
Node(E item, Node<E> previous, Node<E> next) {
this.item = item;
this.previous = previous;
this.next = next;
}
}
}<|fim▁end|>
|
* @author Wamdue
* @version 1.0
* @param <E> - class to store.
*/
|
<|file_name|>closest_points_shape_shape.rs<|end_file_name|><|fim▁begin|>use na::RealField;
use crate::math::{Isometry, Point};
use crate::query::{self, ClosestPoints};
use crate::shape::{Ball, Plane, Segment, Shape};
/// Computes the pair of closest points between two shapes.
///
/// Returns `None` if the objects are separated by a distance greater than `max_dist`.
pub fn closest_points<N: RealField>(
m1: &Isometry<N>,
g1: &dyn Shape<N>,
m2: &Isometry<N>,
g2: &dyn Shape<N>,
max_dist: N,
) -> ClosestPoints<N> {
if let (Some(b1), Some(b2)) = (g1.as_shape::<Ball<N>>(), g2.as_shape::<Ball<N>>()) {
let p1 = Point::from(m1.translation.vector);
let p2 = Point::from(m2.translation.vector);
query::closest_points_ball_ball(&p1, b1, &p2, b2, max_dist)
} else if let (Some(s1), Some(s2)) = (g1.as_shape::<Segment<N>>(), g2.as_shape::<Segment<N>>())
{
query::closest_points_segment_segment(m1, s1, m2, s2, max_dist)
} else if let (Some(p1), Some(s2)) = (g1.as_shape::<Plane<N>>(), g2.as_support_map()) {<|fim▁hole|> } else if let (Some(s1), Some(s2)) = (g1.as_support_map(), g2.as_support_map()) {
query::closest_points_support_map_support_map(m1, s1, m2, s2, max_dist)
} else if let Some(c1) = g1.as_composite_shape() {
query::closest_points_composite_shape_shape(m1, c1, m2, g2, max_dist)
} else if let Some(c2) = g2.as_composite_shape() {
query::closest_points_shape_composite_shape(m1, g1, m2, c2, max_dist)
} else {
panic!("No algorithm known to compute a contact point between the given pair of shapes.")
}
}<|fim▁end|>
|
query::closest_points_plane_support_map(m1, p1, m2, s2, max_dist)
} else if let (Some(s1), Some(p2)) = (g1.as_support_map(), g2.as_shape::<Plane<N>>()) {
query::closest_points_support_map_plane(m1, s1, m2, p2, max_dist)
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import props from './props';
import './view.html';
class NoteClab {
beforeRegister() {<|fim▁hole|> this.is = 'note-clab';
this.properties = props;
}
computeClasses(type) {
var arr = ['input-note'];
if (type != undefined) arr.push(type);
return arr.join(' ');
}
}
Polymer(NoteClab);<|fim▁end|>
| |
<|file_name|>solution.py<|end_file_name|><|fim▁begin|>#!/bin/python3
import sys<|fim▁hole|>
x1, v1, x2, v2 = map(int, input().strip().split(' '))
willLand = (
v1 != v2
and (x1 - x2) % (v2 - v1) == 0
and (x1 - x2) // (v2 - v1) >= 0)
print(('NO', 'YES')[willLand])<|fim▁end|>
| |
<|file_name|>engine.cpp<|end_file_name|><|fim▁begin|>/*
+------------------------------------------------------------------------+
| Phalcon Framework |
+------------------------------------------------------------------------+<|fim▁hole|> | with this package in the file docs/LICENSE.txt. |
| |
| If you did not receive a copy of the license and are unable to |
| obtain it through the world-wide-web, please send an email |
| to [email protected] so we can send you a copy immediately. |
+------------------------------------------------------------------------+
| Authors: Andres Gutierrez <[email protected]> |
| Eduar Carvajal <[email protected]> |
+------------------------------------------------------------------------+
*/
#include "mvc/view/engine.h"
#include "mvc/view/engineinterface.h"
#include "di/injectable.h"
#include "kernel/main.h"
#include "kernel/memory.h"
#include "kernel/object.h"
#include "kernel/fcall.h"
/**
* Phalcon\Mvc\View\Engine
*
* All the template engine adapters must inherit this class. This provides
* basic interfacing between the engine and the Phalcon\Mvc\View component.
*/
zend_class_entry *phalcon_mvc_view_engine_ce;
PHP_METHOD(Phalcon_Mvc_View_Engine, __construct);
PHP_METHOD(Phalcon_Mvc_View_Engine, getContent);
PHP_METHOD(Phalcon_Mvc_View_Engine, partial);
PHP_METHOD(Phalcon_Mvc_View_Engine, getView);
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_view_engine___construct, 0, 0, 1)
ZEND_ARG_INFO(0, view)
ZEND_ARG_INFO(0, dependencyInjector)
ZEND_END_ARG_INFO()
static const zend_function_entry phalcon_mvc_view_engine_method_entry[] = {
PHP_ME(Phalcon_Mvc_View_Engine, __construct, arginfo_phalcon_mvc_view_engine___construct, ZEND_ACC_PUBLIC|ZEND_ACC_CTOR)
PHP_ME(Phalcon_Mvc_View_Engine, getContent, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_View_Engine, partial, arginfo_phalcon_mvc_view_engineinterface_partial, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_View_Engine, getView, NULL, ZEND_ACC_PUBLIC)
PHP_FE_END
};
/**
* Phalcon\Mvc\View\Engine initializer
*/
PHALCON_INIT_CLASS(Phalcon_Mvc_View_Engine){
PHALCON_REGISTER_CLASS_EX(Phalcon\\Mvc\\View, Engine, mvc_view_engine, phalcon_di_injectable_ce, phalcon_mvc_view_engine_method_entry, ZEND_ACC_EXPLICIT_ABSTRACT_CLASS);
zend_declare_property_null(phalcon_mvc_view_engine_ce, SL("_view"), ZEND_ACC_PROTECTED TSRMLS_CC);
zend_class_implements(phalcon_mvc_view_engine_ce TSRMLS_CC, 1, phalcon_mvc_view_engineinterface_ce);
return SUCCESS;
}
/**
* Phalcon\Mvc\View\Engine constructor
*
* @param Phalcon\Mvc\ViewInterface $view
* @param Phalcon\DiInterface $dependencyInjector
*/
PHP_METHOD(Phalcon_Mvc_View_Engine, __construct){
zval *view, *dependency_injector = NULL;
phalcon_fetch_params(0, 1, 1, &view, &dependency_injector);
if (!dependency_injector) {
dependency_injector = PHALCON_GLOBAL(z_null);
}
phalcon_update_property_this(this_ptr, SL("_view"), view TSRMLS_CC);
phalcon_update_property_this(this_ptr, SL("_dependencyInjector"), dependency_injector TSRMLS_CC);
}
/**
* Returns cached ouput on another view stage
*
* @return array
*/
PHP_METHOD(Phalcon_Mvc_View_Engine, getContent)
{
zval *view = phalcon_fetch_nproperty_this(this_ptr, SL("_view"), PH_NOISY TSRMLS_CC);
PHALCON_RETURN_CALL_METHODW(view, "getcontent");
}
/**
* Renders a partial inside another view
*
* @param string $partialPath
* @param array $params
* @return string
*/
PHP_METHOD(Phalcon_Mvc_View_Engine, partial){
zval *partial_path, *params = NULL, *view;
phalcon_fetch_params(0, 1, 1, &partial_path, ¶ms);
if (!params) {
params = PHALCON_GLOBAL(z_null);
}
view = phalcon_fetch_nproperty_this(this_ptr, SL("_view"), PH_NOISY TSRMLS_CC);
PHALCON_RETURN_CALL_METHODW(view, "partial", partial_path, params);
}
/**
* Returns the view component related to the adapter
*
* @return Phalcon\Mvc\ViewInterface
*/
PHP_METHOD(Phalcon_Mvc_View_Engine, getView){
RETURN_MEMBER(this_ptr, "_view");
}<|fim▁end|>
|
| Copyright (c) 2011-2014 Phalcon Team (http://www.phalconphp.com) |
+------------------------------------------------------------------------+
| This source file is subject to the New BSD License that is bundled |
|
<|file_name|>0004_auto_20190610_2250.py<|end_file_name|><|fim▁begin|># Generated by Django 2.2.1 on 2019-06-10 19:50
from django.db import migrations
class Migration(migrations.Migration):
<|fim▁hole|> ('registration', '0003_limbomember_mother_tongue'),
]
operations = [
migrations.RenameModel(
old_name='LimboMember',
new_name='Applicant',
),
]<|fim▁end|>
|
dependencies = [
|
<|file_name|>vmware_guest_serial_port.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Anusha Hegde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_guest_serial_port
short_description: Manage serial ports on an existing VM
version_added: "2.10"
description:
- "This module can be used to manage serial ports on an existing VM"
options:
name:
description:
- Name of the virtual machine.
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
type: str
uuid:
description:
- UUID of the instance to manage the serial ports, this is VMware's unique identifier.
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
backings:
type: list
description:
- A list of backings for serial ports.
- 'C(backing_type) (str): is required to add or reconfigure or remove an existing serial port.'
- 'Valid attributes are:'
- ' - C(backing_type) (str): Backing type is required for the serial ports to be added or reconfigured or removed.'
- ' - C(state) (str): is required to identify whether we are adding, modifying or removing the serial port.
- choices:
- C(present): modify an existing serial port. C(backing_type) is required to determine the port.
The first matching C(backing_type) and either of C(service_uri) or C(pipe_name) or C(device_name) or C(file_path) will be modified.
If there is only one device with a backing type, the secondary details are not needed.
We will match the last such device with the given backing type.
- C(absent): remove an existing serial port. C(backing_type) is required to determine the port.
The first matching C(backing_type) and either of C(service_uri) or C(pipe_name) or C(device_name) or C(file_path) will be removed.
If there is only one device with a backing type, the secondary details are not needed.
We will match the last such device with the given backing type.'
- ' - C(yield_on_poll) (bool): Enables CPU yield behavior. Default value is true.'
- ' - C(direction) (str): Required when I(backing_type=network).
The direction of the connection.
- choices:
- client
- server'
- ' - C(service_uri) (str): Required when I(backing_type=network).
Identifies the local host or a system on the network, depending on the value of I(direction).
If you use the virtual machine as a server, the URI identifies the host on which the virtual machine runs.
In this case, the host name part of the URI should be empty, or it should specify the address of the local host.
If you use the virtual machine as a client, the URI identifies the remote system on the network.'
- ' - C(endpoint) (str): Required when I(backing_type=pipe).
When you use serial port pipe backing to connect a virtual machine to another process, you must define the endpoints.'
- ' - C(no_rx_loss) (bool): Required when I(backing_type=pipe).
Enables optimized data transfer over the pipe.
- choices:
- client
- server'
- ' - C(pipe_name) (str): Required when I(backing_type=pipe).'
- ' - C(device_name) (str): Required when I(backing_type=device).'
- ' - C(file_path) (str): Required when I(backing_type=file).
File path for the host file used in this backing. Fully qualified path is required, like <datastore_name>/<file_name>'
extends_documentation_fragment:
- vmware.documentation
author:
- Anusha Hegde (@anusha94)
'''
EXAMPLES = '''
# Create serial ports
- name: Create multiple serial ports with Backing type - network, pipe, device and file
vmware_guest_serial_port:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: "test_vm1"
backings:
- type: 'network'
direction: 'client'
service_uri: 'tcp://6000'
yield_on_poll: True
- type: 'pipe'
pipe_name: 'serial_pipe'
endpoint: 'client'
- type: 'device'
device_name: '/dev/char/serial/uart0'
- type: 'file'
file_path: '[datastore1]/file1'
yield_on_poll: True
register: create_multiple_ports
# Modify existing serial port
- name: Modify Network backing type
vmware_guest_serial_port:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
name: '{{ name }}'
backings:
- type: 'network'
state: 'present'
direction: 'server'
service_uri: 'tcp://6000'
delegate_to: localhost
# Remove serial port
- name: Remove pipe backing type
vmware_guest_serial_port:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
name: '{{ name }}'
backings:
- type: 'pipe'
state: 'absent'
delegate_to: localhost
'''
RETURN = r'''
serial_port_data:
description: metadata about the virtual machine's serial ports after managing them
returned: always
type: dict
sample: [
{
"backing_type": "network",
"direction": "client",
"service_uri": "tcp://6000"
},
{
"backing_type": "pipe",
"direction": "server",
"pipe_name": "serial pipe"
},
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
from ansible.module_utils._text import to_native
try:
from pyVmomi import vim
except ImportError:
pass
class PyVmomiHelper(PyVmomi):
""" This class is a helper to create easily VMware Spec for PyVmomiHelper """
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.change_applied = False # a change was applied meaning at least one task succeeded
self.config_spec = vim.vm.ConfigSpec()
self.config_spec.deviceChange = []
self.serial_ports = []
def check_vm_state(self, vm_obj):
"""
To add serial port, the VM must be in powered off state
Input:
- vm: Virtual Machine
Output:
- True if vm is in poweredOff state
- module fails otherwise
"""
if vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff:
return True
else:
self.module.fail_json(msg="A serial device cannot be added to a VM in the current state(" + vm_obj.runtime.powerState + ")."
+ "Please use the vmware_guest_powerstate module to power off the VM")
def get_serial_port_config_spec(self, vm_obj):
"""
Variables changed:
- self.config_spec
- self.change_applied
"""
# create serial config spec for adding, editing, removing
for backing in self.params.get('backings'):
backing_keys = backing.keys()
serial_port = get_serial_port(vm_obj, backing)
if serial_port is None and 'state' not in backing_keys:
# if serial port is None and state is not mentioned
# create a new serial port
serial_port_spec = self.create_serial_port(backing)
serial_port_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
self.serial_ports.append(serial_port_spec)
self.change_applied = True
else:
if serial_port is not None and 'state' in backing_keys:
serial_spec = vim.vm.device.VirtualDeviceSpec()
serial_spec.device = serial_port
if backing['state'].lower() == 'present':
# modify existing serial port
serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
serial_spec.device.backing = self.get_backing_info(serial_port, backing, backing['type'])
self.change_applied = True
self.config_spec.deviceChange.append(serial_spec)
elif backing['state'].lower() == 'absent':
# remove serial port
serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
self.change_applied = True
self.config_spec.deviceChange.append(serial_spec)
else:
self.module.fail_json(msg='Unable to find the specified serial port: %s' % backing)
def reconfigure_vm_serial_port(self, vm_obj):
"""
Reconfigure vm with new or modified serial port config spec
"""
self.get_serial_port_config_spec(vm_obj)
try:
# configure create tasks first
if self.serial_ports:
for serial_port in self.serial_ports:
# each type of serial port is of config_spec.device = vim.vm.device.VirtualSerialPort() object type
# because serial ports differ in the backing types and config_spec.device has to be unique,
# we are creating a new spec for every create port configuration
spec = vim.vm.ConfigSpec()
spec.deviceChange.append(serial_port)
task = vm_obj.ReconfigVM_Task(spec=spec)
wait_for_task(task)
task = vm_obj.ReconfigVM_Task(spec=self.config_spec)
wait_for_task(task)
except vim.fault.InvalidDatastorePath as e:
self.module.fail_json(msg="Failed to configure serial port on given virtual machine due to invalid path: %s" % to_native(e.msg))
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to reconfigure virtual machine due to product versioning restrictions: %s" % to_native(e.msg))
if task.info.state == 'error':
results = {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg}
else:
serial_port_info = get_serial_port_info(vm_obj)
results = {'changed': self.change_applied, 'failed': False, 'serial_port_info': serial_port_info}<|fim▁hole|>
return results
def set_network_backing(self, serial_port, backing_info):
"""
Set the networking backing params
"""
required_params = ['service_uri', 'direction']
if set(required_params).issubset(backing_info.keys()):
backing = serial_port.URIBackingInfo()
backing.serviceURI = backing_info['service_uri']
backing.direction = backing_info['direction']
else:
self.module.fail_json(msg="Failed to create a new serial port of network backing type due to insufficient parameters."
+ "The required parameters are service_uri and direction")
return backing
def set_pipe_backing(self, serial_port, backing_info):
"""
Set the pipe backing params
"""
required_params = ['pipe_name', 'endpoint']
if set(required_params).issubset(backing_info.keys()):
backing = serial_port.PipeBackingInfo()
backing.pipeName = backing_info['pipe_name']
backing.endpoint = backing_info['endpoint']
else:
self.module.fail_json(msg="Failed to create a new serial port of pipe backing type due to insufficient parameters."
+ "The required parameters are pipe_name and endpoint")
# since no_rx_loss is an optional argument, so check if the key is present
if 'no_rx_loss' in backing_info.keys() and backing_info['no_rx_loss']:
backing.noRxLoss = backing_info['no_rx_loss']
return backing
def set_device_backing(self, serial_port, backing_info):
"""
Set the device backing params
"""
required_params = ['device_name']
if set(required_params).issubset(backing_info.keys()):
backing = serial_port.DeviceBackingInfo()
backing.deviceName = backing_info['device_name']
else:
self.module.fail_json(msg="Failed to create a new serial port of device backing type due to insufficient parameters."
+ "The required parameters are device_name")
return backing
def set_file_backing(self, serial_port, backing_info):
"""
Set the file backing params
"""
required_params = ['file_path']
if set(required_params).issubset(backing_info.keys()):
backing = serial_port.FileBackingInfo()
backing.fileName = backing_info['file_path']
else:
self.module.fail_json(msg="Failed to create a new serial port of file backing type due to insufficient parameters."
+ "The required parameters are file_path")
return backing
def get_backing_info(self, serial_port, backing, backing_type):
"""
Returns the call to the appropriate backing function based on the backing type
"""
switcher = {
"network": self.set_network_backing,
"pipe": self.set_pipe_backing,
"device": self.set_device_backing,
"file": self.set_file_backing
}
backing_func = switcher.get(backing_type, "Invalid Backing Info")
return backing_func(serial_port, backing)
def create_serial_port(self, backing):
"""
Create a new serial port
"""
serial_spec = vim.vm.device.VirtualDeviceSpec()
serial_port = vim.vm.device.VirtualSerialPort()
serial_port.yieldOnPoll = backing['yield_on_poll'] if 'yield_on_poll' in backing.keys() else True
serial_port.backing = self.get_backing_info(serial_port, backing, backing['type'])
serial_spec.device = serial_port
return serial_spec
def get_serial_port(vm_obj, backing):
"""
Return the serial port of specified backing type
"""
serial_port = None
backing_type_mapping = {
'network': vim.vm.device.VirtualSerialPort.URIBackingInfo,
'pipe': vim.vm.device.VirtualSerialPort.PipeBackingInfo,
'device': vim.vm.device.VirtualSerialPort.DeviceBackingInfo,
'file': vim.vm.device.VirtualSerialPort.FileBackingInfo
}
valid_params = backing.keys()
for device in vm_obj.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualSerialPort):
if isinstance(device.backing, backing_type_mapping[backing['type']]):
if 'service_uri' in valid_params:
# network backing type
if device.backing.serviceURI == backing['service_uri']:
serial_port = device
break
elif 'pipe_name' in valid_params:
# named pipe backing type
if device.backing.pipeName == backing['pipe_name']:
serial_port = device
break
elif 'device_name' in valid_params:
# physical serial device backing type
if device.backing.deviceName == backing['device_name']:
serial_port = device
break
elif 'file_path' in valid_params:
# file backing type
if device.backing.fileName == backing['file_path']:
serial_port = device
break
# if there is a backing of only one type, user need not provide secondary details like service_uri, pipe_name, device_name or file_path
# we will match the serial port with backing type only
# in this case, the last matching serial port will be returned
serial_port = device
return serial_port
def get_serial_port_info(vm_obj):
"""
Get the serial port info
"""
serial_port_info = []
if vm_obj is None:
return serial_port_info
for port in vm_obj.config.hardware.device:
backing = dict()
if isinstance(port, vim.vm.device.VirtualSerialPort):
if isinstance(port.backing, vim.vm.device.VirtualSerialPort.URIBackingInfo):
backing['backing_type'] = 'network'
backing['direction'] = port.backing.direction
backing['service_uri'] = port.backing.serviceURI
elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.PipeBackingInfo):
backing['backing_type'] = 'pipe'
backing['pipe_name'] = port.backing.pipeName
backing['endpoint'] = port.backing.endpoint
backing['no_rx_loss'] = port.backing.noRxLoss
elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.DeviceBackingInfo):
backing['backing_type'] = 'device'
backing['device_name'] = port.backing.deviceName
elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.FileBackingInfo):
backing['backing_type'] = 'file'
backing['file_path'] = port.backing.fileName
else:
continue
serial_port_info.append(backing)
return serial_port_info
def main():
"""
Main method
"""
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
backings=dict(type='list', default=[])
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid', 'moid']
],
mutually_exclusive=[
['name', 'uuid', 'moid']
],
)
result = {'failed': False, 'changed': False}
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm_obj = pyv.get_vm()
if vm_obj:
proceed = pyv.check_vm_state(vm_obj)
if proceed:
result = pyv.reconfigure_vm_serial_port(vm_obj)
else:
# We are unable to find the virtual machine user specified
# Bail out
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('vm_id'))
module.fail_json(msg="Unable to manage serial ports for non-existing"
" virtual machine '%s'." % vm_id)
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>api.py<|end_file_name|><|fim▁begin|># coding: utf-8
import wolframalpha
from .exceptions import APIError
from .clients import turingclient, baiduclient
from .utils import get_mac_address, get_audio_info
class Wolfram(object):
"""A client for request Wolfram.
Attributes:
key: The key string got from https://www.wolframalpha.com.
"""
def __init__(self, key):
self.key = key
def ask_wolfram(self, question):
client = wolframalpha.Client(self.key)
res = client.query(question)
if len(res.pods) > 0:
pod = res.pods[1]
if pod.text:
texts = pod.text
else:
raise APIError('Wolfram API failed.')
# to skip ascii character in case of error
texts = texts.encode('ascii', 'ignore')
return texts
else:
raise APIError('Wolfram API failed.')
class TuringRobot(object):
"""A client for request Turing Robot.
<|fim▁hole|> def __init__(self, key):
self.key = key
def ask_turing(self, question):
params = {
'key': self.key,
'info': question
}
ret = turingclient.query_turing(params)
code = ret.get('code')
if code == 100000:
return ret['text'].encode('utf-8')
else:
raise APIError('Cannot handle this ret code: %s' % code)
class BaiduVoice(object):
"""A client for request Turing Robot.
Attributes:
token: The token string got from https://openapi.baidu.com/oauth/2.0/token.
cuid: Unique identification of user, default is MAC address.
"""
def __init__(self, token):
self.token = token
self.cuid = get_mac_address()
def asr(self, file_, format_='wav',
cuid=None, ptc=1, lan='zh'):
"""Constructs and sends an Automatic Speech Recognition request.
Args:
file_: the open file with methods write(), close(), tell(), seek()
set through the __init__() method.
format_:(optional) the audio format, default is 'wav'
cuid:(optional) Unique identification of user, default is MAC address.
ptc:(optional) nbest results, the number of results.
lan:(optional) language, default is 'zh'.
Returns:
A list of recognition results.
Raises:
ValueError
RecognitionError
VerifyError
APIError
QuotaError
"""
if format_ != 'wav':
raise ValueError('Unsupported audio format')
params = {
'format': format_,
'token': self.token,
'cuid': cuid or self.cuid,
'ptc': ptc,
'lan': lan
}
try:
audio_info = get_audio_info(file_)
except Exception, e:
raise e
params['len'], params['rate'] = audio_info['nframes'], audio_info['framerate']
return baiduclient.asr(audio_info['content'], params)
def tts(self, tex, lan='zh', ctp=1,
cuid=None, spd=5, pit=5, vol=5, per=0):
"""Constructs and sends an Text To Speech request.
Args:
tex: The text for conversion.
lan:(optional) language, default is 'zh'.
ctp:(optional) Client type, default is 1.
cuid:(optional) Unique identification of user, default is MAC address.
spd:(optional) speed, range 0-9, default is 5.
pit:(optional) pitch, range 0-9, default is 5.
vol:(optional) volume, range 0-9, default is 5.
per:(optional) voice of male or female, default is 0 for female voice.
Returns:
A binary string of MP3 format audio.
Raises:
ValueError
VerifyError
APIError
"""
params = {
'tex': tex,
'lan': lan,
'tok': self.token,
'ctp': ctp,
'cuid': cuid or self.cuid,
'spd': spd,
'pit': pit,
'vol': vol,
'per': per
}
return baiduclient.tts(params)
@staticmethod
def get_baidu_token(api_key, secret_key):
"""Get Baidu Voice Service token by api key and secret.
Functions of other args of response are not confirmed, so the whole
response dict will be returned, you can access the token by ret['access_token'].
"""
params = {
'grant_type': 'client_credentials',
'client_id': api_key,
'client_secret': secret_key
}
return baiduclient.get_token(params)<|fim▁end|>
|
Attributes:
key: The key string got from http://www.tuling123.com.
"""
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/*
* GET home page.
*/
exports.index = function(req, res){
res.render('index', { title: 'DSA' });
};<|fim▁hole|><|fim▁end|>
|
exports.game = function (req,res){
res.render('game',{title: req.params.id })
};
|
<|file_name|>memory_bundle_cache_test.cc<|end_file_name|><|fim▁begin|>/*
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and<|fim▁hole|>#include "Firestore/core/src/local/memory_persistence.h"
#include "Firestore/core/test/unit/local/bundle_cache_test.h"
#include "Firestore/core/test/unit/local/persistence_testing.h"
namespace firebase {
namespace firestore {
namespace local {
namespace {
std::unique_ptr<Persistence> PersistenceFactory() {
return MemoryPersistenceWithEagerGcForTesting();
}
} // namespace
INSTANTIATE_TEST_SUITE_P(MemoryBundleCacheTest,
BundleCacheTest,
testing::Values(PersistenceFactory));
} // namespace local
} // namespace firestore
} // namespace firebase<|fim▁end|>
|
* limitations under the License.
*/
|
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from regressiontests.generic_views.models import Artist, Author, Book, Page
from regressiontests.generic_views.forms import AuthorForm
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
return {
'params': kwargs,
'key': 'value'
}
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'last': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])<|fim▁hole|>class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass<|fim▁end|>
| |
<|file_name|>zsysnum_linux_amd64.go<|end_file_name|><|fim▁begin|>// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,linux
package unix
const (
SYS_READ = 0
SYS_WRITE = 1
SYS_OPEN = 2
SYS_CLOSE = 3
SYS_STAT = 4
SYS_FSTAT = 5
SYS_LSTAT = 6
SYS_POLL = 7
SYS_LSEEK = 8
SYS_MMAP = 9
SYS_MPROTECT = 10
SYS_MUNMAP = 11
SYS_BRK = 12
SYS_RT_SIGACTION = 13
SYS_RT_SIGPROCMASK = 14
SYS_RT_SIGRETURN = 15
SYS_IOCTL = 16
SYS_PREAD64 = 17<|fim▁hole|> SYS_WRITEV = 20
SYS_ACCESS = 21
SYS_PIPE = 22
SYS_SELECT = 23
SYS_SCHED_YIELD = 24
SYS_MREMAP = 25
SYS_MSYNC = 26
SYS_MINCORE = 27
SYS_MADVISE = 28
SYS_SHMGET = 29
SYS_SHMAT = 30
SYS_SHMCTL = 31
SYS_DUP = 32
SYS_DUP2 = 33
SYS_PAUSE = 34
SYS_NANOSLEEP = 35
SYS_GETITIMER = 36
SYS_ALARM = 37
SYS_SETITIMER = 38
SYS_GETPID = 39
SYS_SENDFILE = 40
SYS_SOCKET = 41
SYS_CONNECT = 42
SYS_ACCEPT = 43
SYS_SENDTO = 44
SYS_RECVFROM = 45
SYS_SENDMSG = 46
SYS_RECVMSG = 47
SYS_SHUTDOWN = 48
SYS_BIND = 49
SYS_LISTEN = 50
SYS_GETSOCKNAME = 51
SYS_GETPEERNAME = 52
SYS_SOCKETPAIR = 53
SYS_SETSOCKOPT = 54
SYS_GETSOCKOPT = 55
SYS_CLONE = 56
SYS_FORK = 57
SYS_VFORK = 58
SYS_EXECVE = 59
SYS_EXIT = 60
SYS_WAIT4 = 61
SYS_KILL = 62
SYS_UNAME = 63
SYS_SEMGET = 64
SYS_SEMOP = 65
SYS_SEMCTL = 66
SYS_SHMDT = 67
SYS_MSGGET = 68
SYS_MSGSND = 69
SYS_MSGRCV = 70
SYS_MSGCTL = 71
SYS_FCNTL = 72
SYS_FLOCK = 73
SYS_FSYNC = 74
SYS_FDATASYNC = 75
SYS_TRUNCATE = 76
SYS_FTRUNCATE = 77
SYS_GETDENTS = 78
SYS_GETCWD = 79
SYS_CHDIR = 80
SYS_FCHDIR = 81
SYS_RENAME = 82
SYS_MKDIR = 83
SYS_RMDIR = 84
SYS_CREAT = 85
SYS_LINK = 86
SYS_UNLINK = 87
SYS_SYMLINK = 88
SYS_READLINK = 89
SYS_CHMOD = 90
SYS_FCHMOD = 91
SYS_CHOWN = 92
SYS_FCHOWN = 93
SYS_LCHOWN = 94
SYS_UMASK = 95
SYS_GETTIMEOFDAY = 96
SYS_GETRLIMIT = 97
SYS_GETRUSAGE = 98
SYS_SYSINFO = 99
SYS_TIMES = 100
SYS_PTRACE = 101
SYS_GETUID = 102
SYS_SYSLOG = 103
SYS_GETGID = 104
SYS_SETUID = 105
SYS_SETGID = 106
SYS_GETEUID = 107
SYS_GETEGID = 108
SYS_SETPGID = 109
SYS_GETPPID = 110
SYS_GETPGRP = 111
SYS_SETSID = 112
SYS_SETREUID = 113
SYS_SETREGID = 114
SYS_GETGROUPS = 115
SYS_SETGROUPS = 116
SYS_SETRESUID = 117
SYS_GETRESUID = 118
SYS_SETRESGID = 119
SYS_GETRESGID = 120
SYS_GETPGID = 121
SYS_SETFSUID = 122
SYS_SETFSGID = 123
SYS_GETSID = 124
SYS_CAPGET = 125
SYS_CAPSET = 126
SYS_RT_SIGPENDING = 127
SYS_RT_SIGTIMEDWAIT = 128
SYS_RT_SIGQUEUEINFO = 129
SYS_RT_SIGSUSPEND = 130
SYS_SIGALTSTACK = 131
SYS_UTIME = 132
SYS_MKNOD = 133
SYS_USELIB = 134
SYS_PERSONALITY = 135
SYS_USTAT = 136
SYS_STATFS = 137
SYS_FSTATFS = 138
SYS_SYSFS = 139
SYS_GETPRIORITY = 140
SYS_SETPRIORITY = 141
SYS_SCHED_SETPARAM = 142
SYS_SCHED_GETPARAM = 143
SYS_SCHED_SETSCHEDULER = 144
SYS_SCHED_GETSCHEDULER = 145
SYS_SCHED_GET_PRIORITY_MAX = 146
SYS_SCHED_GET_PRIORITY_MIN = 147
SYS_SCHED_RR_GET_INTERVAL = 148
SYS_MLOCK = 149
SYS_MUNLOCK = 150
SYS_MLOCKALL = 151
SYS_MUNLOCKALL = 152
SYS_VHANGUP = 153
SYS_MODIFY_LDT = 154
SYS_PIVOT_ROOT = 155
SYS__SYSCTL = 156
SYS_PRCTL = 157
SYS_ARCH_PRCTL = 158
SYS_ADJTIMEX = 159
SYS_SETRLIMIT = 160
SYS_CHROOT = 161
SYS_SYNC = 162
SYS_ACCT = 163
SYS_SETTIMEOFDAY = 164
SYS_MOUNT = 165
SYS_UMOUNT2 = 166
SYS_SWAPON = 167
SYS_SWAPOFF = 168
SYS_REBOOT = 169
SYS_SETHOSTNAME = 170
SYS_SETDOMAINNAME = 171
SYS_IOPL = 172
SYS_IOPERM = 173
SYS_CREATE_MODULE = 174
SYS_INIT_MODULE = 175
SYS_DELETE_MODULE = 176
SYS_GET_KERNEL_SYMS = 177
SYS_QUERY_MODULE = 178
SYS_QUOTACTL = 179
SYS_NFSSERVCTL = 180
SYS_GETPMSG = 181
SYS_PUTPMSG = 182
SYS_AFS_SYSCALL = 183
SYS_TUXCALL = 184
SYS_SECURITY = 185
SYS_GETTID = 186
SYS_READAHEAD = 187
SYS_SETXATTR = 188
SYS_LSETXATTR = 189
SYS_FSETXATTR = 190
SYS_GETXATTR = 191
SYS_LGETXATTR = 192
SYS_FGETXATTR = 193
SYS_LISTXATTR = 194
SYS_LLISTXATTR = 195
SYS_FLISTXATTR = 196
SYS_REMOVEXATTR = 197
SYS_LREMOVEXATTR = 198
SYS_FREMOVEXATTR = 199
SYS_TKILL = 200
SYS_TIME = 201
SYS_FUTEX = 202
SYS_SCHED_SETAFFINITY = 203
SYS_SCHED_GETAFFINITY = 204
SYS_SET_THREAD_AREA = 205
SYS_IO_SETUP = 206
SYS_IO_DESTROY = 207
SYS_IO_GETEVENTS = 208
SYS_IO_SUBMIT = 209
SYS_IO_CANCEL = 210
SYS_GET_THREAD_AREA = 211
SYS_LOOKUP_DCOOKIE = 212
SYS_EPOLL_CREATE = 213
SYS_EPOLL_CTL_OLD = 214
SYS_EPOLL_WAIT_OLD = 215
SYS_REMAP_FILE_PAGES = 216
SYS_GETDENTS64 = 217
SYS_SET_TID_ADDRESS = 218
SYS_RESTART_SYSCALL = 219
SYS_SEMTIMEDOP = 220
SYS_FADVISE64 = 221
SYS_TIMER_CREATE = 222
SYS_TIMER_SETTIME = 223
SYS_TIMER_GETTIME = 224
SYS_TIMER_GETOVERRUN = 225
SYS_TIMER_DELETE = 226
SYS_CLOCK_SETTIME = 227
SYS_CLOCK_GETTIME = 228
SYS_CLOCK_GETRES = 229
SYS_CLOCK_NANOSLEEP = 230
SYS_EXIT_GROUP = 231
SYS_EPOLL_WAIT = 232
SYS_EPOLL_CTL = 233
SYS_TGKILL = 234
SYS_UTIMES = 235
SYS_VSERVER = 236
SYS_MBIND = 237
SYS_SET_MEMPOLICY = 238
SYS_GET_MEMPOLICY = 239
SYS_MQ_OPEN = 240
SYS_MQ_UNLINK = 241
SYS_MQ_TIMEDSEND = 242
SYS_MQ_TIMEDRECEIVE = 243
SYS_MQ_NOTIFY = 244
SYS_MQ_GETSETATTR = 245
SYS_KEXEC_LOAD = 246
SYS_WAITID = 247
SYS_ADD_KEY = 248
SYS_REQUEST_KEY = 249
SYS_KEYCTL = 250
SYS_IOPRIO_SET = 251
SYS_IOPRIO_GET = 252
SYS_INOTIFY_INIT = 253
SYS_INOTIFY_ADD_WATCH = 254
SYS_INOTIFY_RM_WATCH = 255
SYS_MIGRATE_PAGES = 256
SYS_OPENAT = 257
SYS_MKDIRAT = 258
SYS_MKNODAT = 259
SYS_FCHOWNAT = 260
SYS_FUTIMESAT = 261
SYS_NEWFSTATAT = 262
SYS_UNLINKAT = 263
SYS_RENAMEAT = 264
SYS_LINKAT = 265
SYS_SYMLINKAT = 266
SYS_READLINKAT = 267
SYS_FCHMODAT = 268
SYS_FACCESSAT = 269
SYS_PSELECT6 = 270
SYS_PPOLL = 271
SYS_UNSHARE = 272
SYS_SET_ROBUST_LIST = 273
SYS_GET_ROBUST_LIST = 274
SYS_SPLICE = 275
SYS_TEE = 276
SYS_SYNC_FILE_RANGE = 277
SYS_VMSPLICE = 278
SYS_MOVE_PAGES = 279
SYS_UTIMENSAT = 280
SYS_EPOLL_PWAIT = 281
SYS_SIGNALFD = 282
SYS_TIMERFD_CREATE = 283
SYS_EVENTFD = 284
SYS_FALLOCATE = 285
SYS_TIMERFD_SETTIME = 286
SYS_TIMERFD_GETTIME = 287
SYS_ACCEPT4 = 288
SYS_SIGNALFD4 = 289
SYS_EVENTFD2 = 290
SYS_EPOLL_CREATE1 = 291
SYS_DUP3 = 292
SYS_PIPE2 = 293
SYS_INOTIFY_INIT1 = 294
SYS_PREADV = 295
SYS_PWRITEV = 296
SYS_RT_TGSIGQUEUEINFO = 297
SYS_PERF_EVENT_OPEN = 298
SYS_RECVMMSG = 299
SYS_FANOTIFY_INIT = 300
SYS_FANOTIFY_MARK = 301
SYS_PRLIMIT64 = 302
SYS_NAME_TO_HANDLE_AT = 303
SYS_OPEN_BY_HANDLE_AT = 304
SYS_CLOCK_ADJTIME = 305
SYS_SYNCFS = 306
SYS_SENDMMSG = 307
SYS_SETNS = 308
SYS_GETCPU = 309
SYS_PROCESS_VM_READV = 310
SYS_PROCESS_VM_WRITEV = 311
SYS_KCMP = 312
SYS_FINIT_MODULE = 313
SYS_SCHED_SETATTR = 314
SYS_SCHED_GETATTR = 315
SYS_RENAMEAT2 = 316
SYS_SECCOMP = 317
SYS_GETRANDOM = 318
SYS_MEMFD_CREATE = 319
SYS_KEXEC_FILE_LOAD = 320
SYS_BPF = 321
SYS_EXECVEAT = 322
SYS_USERFAULTFD = 323
SYS_MEMBARRIER = 324
SYS_MLOCK2 = 325
SYS_COPY_FILE_RANGE = 326
SYS_PREADV2 = 327
SYS_PWRITEV2 = 328
SYS_PKEY_MPROTECT = 329
SYS_PKEY_ALLOC = 330
SYS_PKEY_FREE = 331
SYS_STATX = 332
SYS_IO_PGETEVENTS = 333
SYS_RSEQ = 334
)<|fim▁end|>
|
SYS_PWRITE64 = 18
SYS_READV = 19
|
<|file_name|>bundle.go<|end_file_name|><|fim▁begin|>package clusterconf
import (
"encoding/json"
"fmt"
"math/rand"
"net/url"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/cerana/cerana/acomm"
"github.com/cerana/cerana/pkg/errors"
)
const bundlesPrefix string = "bundles"
// BundleDatasetType is the type of dataset to be used in a bundle.
type BundleDatasetType int
// Valid bundle dataset types
const (
RWZFS = iota
TempZFS
RAMDisk
)
// Bundle is information about a bundle of services.
type Bundle struct {
c *ClusterConf
ID uint64 `json:"id"`
Datasets map[string]BundleDataset `json:"datasets"`
Services map[string]BundleService `json:"services"`
Redundancy uint64 `json:"redundancy"`
Ports BundlePorts `json:"ports"`
// ModIndex should be treated as opaque, but passed back on updates.
ModIndex uint64 `json:"modIndex"`
}
// BundlePorts is a map of port numbers to port information.
type BundlePorts map[int]BundlePort
// MarshalJSON marshals BundlePorts into a JSON map, converting int keys to
// strings.
func (p BundlePorts) MarshalJSON() ([]byte, error) {
ports := make(map[string]BundlePort)
for port, value := range p {
ports[strconv.Itoa(port)] = value
}
j, err := json.Marshal(ports)
return j, errors.Wrap(err)
}
// UnmarshalJSON unmarshals JSON into a BundlePorts, converting string keys to
// ints.
func (p BundlePorts) UnmarshalJSON(data []byte) error {
ports := make(map[string]BundlePort)
if err := json.Unmarshal(data, &ports); err != nil {
return errors.Wrapv(err, map[string]interface{}{"json": string(data)})
}
p = make(BundlePorts)
for port, value := range ports {
portI, err := strconv.Atoi(port)
if err != nil {
return errors.Wrapv(err, map[string]interface{}{"port": port})
}
p[portI] = value
}
return nil
}
// BundleDataset is configuration for a dataset associated with a bundle.
type BundleDataset struct {
Name string `json:"name"`
ID string `json:"id"`
Type BundleDatasetType `json:"type"`
Quota uint64 `json:"type"`
}
func (d BundleDataset) overlayOn(base *Dataset) (BundleDataset, error) {
if d.ID != base.ID {
return d, errors.Newv("dataset ids do not match", map[string]interface{}{
"bundleDatasetID": d.ID,
"datasetID": base.ID,
})
}
// overlay data
if d.Quota <= 0 {
d.Quota = base.Quota
}
return d, nil
}
// BundleService is configuration overrides for a service of a bundle and
// associated bundles.
type BundleService struct {
ServiceConf
Datasets map[string]ServiceDataset `json:"datasets"`
}
func (s BundleService) overlayOn(base *Service) (BundleService, error) {
if s.ID != base.ID {
return s, errors.Newv("service ids do not match", map[string]interface{}{
"bundleServiceID": s.ID,
"serviceID": base.ID,
})
}
// maps are pointers, so need to be duplicated separately.
result := s
result.Datasets = make(map[string]ServiceDataset)
for k, v := range s.Datasets {
result.Datasets[k] = v
}
result.HealthChecks = make(map[string]HealthCheck)
for k, v := range s.HealthChecks {
result.HealthChecks[k] = v
}
result.Env = make(map[string]string)
for k, v := range s.Env {
result.Env[k] = v
}
// overlay data
if result.Dataset == "" {
result.Dataset = base.Dataset
}
if result.Limits.CPU <= 0 {
result.Limits.CPU = base.Limits.CPU
}
if result.Limits.Memory <= 0 {
result.Limits.Memory = base.Limits.Memory
}
if result.Limits.Processes <= 0 {
result.Limits.Processes = base.Limits.Processes
}
for id, hc := range base.HealthChecks {
_, ok := result.HealthChecks[id]
if !ok {
result.HealthChecks[id] = hc
continue
}
}
for key, val := range base.Env {
_, ok := result.Env[key]
if !ok {
result.Env[key] = val
}
}
if result.Cmd == nil {
result.Cmd = base.Cmd
}
return result, nil
}
// ServiceDataset is configuration for mounting a dataset for a bundle service.
type ServiceDataset struct {
Name string `json:"name"`
MountPoint string `json:"mountPoint"`
ReadOnly bool `json:"readOnly"`
}
// BundlePort is configuration for a port associated with a bundle.
type BundlePort struct {
Port int `json:"port"`
Public bool `json:"public"`
ConnectedBundles []string `json:"connectedBundles"`
ExternalPort int `json:"externalPort"`
}
// DeleteBundleArgs are args for bundle delete task.
type DeleteBundleArgs struct {
ID uint64 `json:"id"`
}
// GetBundleArgs are args for retrieving a bundle.
type GetBundleArgs struct {
ID uint64 `json:"id"`
CombinedOverlay bool `json:"overlay"`
}
// ListBundleArgs are args for retrieving a bundle list.
type ListBundleArgs struct {
CombinedOverlay bool `json:"overlay"`
}
// BundlePayload can be used for task args or result when a bundle object needs
// to be sent.
type BundlePayload struct {
Bundle *Bundle `json:"bundle"`
}
// BundleListResult is the result from listing bundles.
type BundleListResult struct {
Bundles []*Bundle `json:"bundles"`
}
// GetBundle retrieves a bundle.
func (c *ClusterConf) GetBundle(req *acomm.Request) (interface{}, *url.URL, error) {
var args GetBundleArgs
if err := req.UnmarshalArgs(&args); err != nil {
return nil, nil, err
}
if args.ID == 0 {
return nil, nil, errors.Newv("missing arg: id", map[string]interface{}{"args": args})
}
bundle, err := c.getBundle(args.ID)
if err != nil {
return nil, nil, err
}
if args.CombinedOverlay {
bundle, err = bundle.combinedOverlay()
if err != nil {
return nil, nil, err
}
}
return &BundlePayload{bundle}, nil, nil
}
// ListBundles retrieves a list of all bundles.
func (c *ClusterConf) ListBundles(req *acomm.Request) (interface{}, *url.URL, error) {
var args ListBundleArgs
if err := req.UnmarshalArgs(&args); err != nil {
return nil, nil, err
}
keys, err := c.kvKeys(bundlesPrefix)
if err != nil {
return nil, nil, err
}
// extract and deduplicate the bundle ids
ids := make(map[uint64]bool)
keyFormat := filepath.Join(bundlesPrefix, "%d")
for _, key := range keys {
var id uint64
_, err := fmt.Sscanf(key, keyFormat, &id)
if err != nil {
return nil, nil, errors.Newv("failed to extract valid bundle id", map[string]interface{}{"key": key, "keyFormat": keyFormat})
}
ids[id] = true
}
var wg sync.WaitGroup
bundleChan := make(chan *Bundle, len(ids))
errChan := make(chan error, len(ids))
for id := range ids {
wg.Add(1)
go func(id uint64) {
defer wg.Done()
bundle, err := c.getBundle(id)
if err != nil {
errChan <- err
return
}
if args.CombinedOverlay {
bundle, err = bundle.combinedOverlay()
if err != nil {
errChan <- err
return
}
}
bundleChan <- bundle
}(id)
}
wg.Wait()
close(bundleChan)
close(errChan)
if len(errChan) > 0 {
err := <-errChan
return nil, nil, err
}
bundles := make([]*Bundle, 0, len(bundleChan))
for bundle := range bundleChan {
bundles = append(bundles, bundle)
}
return &BundleListResult{
Bundles: bundles,
}, nil, nil
}
// UpdateBundle creates or updates a bundle config. When updating, a Get should first be performed and the modified Bundle passed back.
func (c *ClusterConf) UpdateBundle(req *acomm.Request) (interface{}, *url.URL, error) {
var args BundlePayload
if err := req.UnmarshalArgs(&args); err != nil {
return nil, nil, err
}
if args.Bundle == nil {
return nil, nil, errors.Newv("missing arg: bundle", map[string]interface{}{"args": args})
}
args.Bundle.c = c
if args.Bundle.ID == 0 {
rand.Seed(time.Now().UnixNano())
args.Bundle.ID = uint64(rand.Int63())
}
if err := args.Bundle.update(); err != nil {
return nil, nil, err
}
return &BundlePayload{args.Bundle}, nil, nil
}
// DeleteBundle deletes a bundle config.
func (c *ClusterConf) DeleteBundle(req *acomm.Request) (interface{}, *url.URL, error) {
var args DeleteBundleArgs
if err := req.UnmarshalArgs(&args); err != nil {
return nil, nil, err
}
if args.ID == 0 {
return nil, nil, errors.Newv("missing arg: id", map[string]interface{}{"args": args})
}
bundle, err := c.getBundle(args.ID)
if err != nil {
if strings.Contains(err.Error(), "bundle config not found") {
return nil, nil, nil
}
return nil, nil, err
}
return nil, nil, bundle.delete()
}
func (c *ClusterConf) getBundle(id uint64) (*Bundle, error) {
bundle := &Bundle{
c: c,
ID: id,
}
if err := bundle.reload(); err != nil {
return nil, err
}
return bundle, nil
}
func (b *Bundle) reload() error {
var err error
key := path.Join(bundlesPrefix, strconv.FormatUint(b.ID, 10), "config")
value, err := b.c.kvGet(key)
if err != nil {
if strings.Contains(err.Error(), "key not found") {
err = errors.Newv("bundle config not found", map[string]interface{}{"bundleID": b.ID})
}
return err
}
if err = json.Unmarshal(value.Data, &b); err != nil {
return errors.Wrapv(err, map[string]interface{}{"json": string(value.Data)})
}
b.ModIndex = value.Index<|fim▁hole|>
return nil
}
func (b *Bundle) delete() error {
key := path.Join(bundlesPrefix, strconv.FormatUint(b.ID, 10))
return errors.Wrapv(b.c.kvDelete(key, b.ModIndex), map[string]interface{}{"bundleID": b.ID})
}
// update saves the core bundle config.
func (b *Bundle) update() error {
key := path.Join(bundlesPrefix, strconv.FormatUint(b.ID, 10), "config")
index, err := b.c.kvUpdate(key, b, b.ModIndex)
if err != nil {
return errors.Wrapv(err, map[string]interface{}{"bundleID": b.ID})
}
b.ModIndex = index
return nil
}
// combinedOverlay will create a new *Bundle object containing the base configurations of datasets and services with the bundle values overlayed on top.
// Note: Attempting to save a combined overlay bundle will result in an error.
func (b *Bundle) combinedOverlay() (*Bundle, error) {
var wg sync.WaitGroup
errorChan := make(chan error, len(b.Datasets)+len(b.Services))
defer close(errorChan)
// duplicate bundle using json to also duplicate the map field values
var result Bundle
tmp, err := json.Marshal(b)
if err != nil {
return nil, errors.Wrapv(err, map[string]interface{}{"bundle": b}, "failed to marshal bundle")
}
if err := json.Unmarshal(tmp, &result); err != nil {
return nil, errors.Wrapv(err, map[string]interface{}{"bundle": b, "json": string(tmp)}, "failed to unmarshal bundle")
}
result.Datasets = make(map[string]BundleDataset)
for k, v := range b.Datasets {
result.Datasets[k] = v
}
result.Services = make(map[string]BundleService)
for k, v := range b.Services {
result.Services[k] = v
}
result.Ports = make(BundlePorts)
for k, v := range b.Ports {
result.Ports[k] = v
}
for i, d := range b.Datasets {
wg.Add(1)
go func(id string, bd BundleDataset) {
defer wg.Done()
dataset, err := b.c.getDataset(id)
if err != nil {
errorChan <- err
return
}
combined, err := bd.overlayOn(dataset)
if err != nil {
errorChan <- err
return
}
result.Datasets[id] = combined
}(i, d)
}
for i, s := range b.Services {
wg.Add(1)
go func(id string, bs BundleService) {
defer wg.Done()
service, err := b.c.getService(id)
if err != nil {
errorChan <- err
return
}
combined, err := bs.overlayOn(service)
if err != nil {
errorChan <- err
return
}
result.Services[id] = combined
}(i, s)
}
wg.Wait()
if len(errorChan) == 0 {
return &result, nil
}
errs := make([]error, len(errorChan))
Loop:
for {
select {
case err := <-errorChan:
errs = append(errs, err)
default:
break Loop
}
}
return nil, errors.Newv("bundle overlay failed", map[string]interface{}{"bundleID": b.ID, "errors": errs})
}<|fim▁end|>
| |
<|file_name|>memoize.js<|end_file_name|><|fim▁begin|>'use strict';
function Memoized() {}
const memoize = (
// Create memoized function
fn // function, sync or async
// Returns: function, memoized
) => {
const cache = new Map();
const memoized = function(...args) {
const callback = args.pop();
const key = args[0];
const record = cache.get(key);
if (record) {
callback(record.err, record.data);
return;
}
fn(...args, (err, data) => {
memoized.add(key, err, data);
memoized.emit('memoize', key, err, data);
callback(err, data);
});
};
const fields = {
cache,
timeout: 0,
limit: 0,
size: 0,
maxSize: 0,
maxCount: 0,
events: {
timeout: null,
memoize: null,
overflow: null,
add: null,
del: null,
clear: null
}
};
Object.setPrototypeOf(memoized, Memoized.prototype);
return Object.assign(memoized, fields);
};
Memoized.prototype.clear = function() {
this.emit('clear');
this.cache.clear();
};
Memoized.prototype.add = function(key, err, data) {
this.emit('add', err, data);
this.cache.set(key, { err, data });
return this;
};
Memoized.prototype.del = function(key) {
this.emit('del', key);
this.cache.delete(key);
return this;
};
Memoized.prototype.get = function(key, callback) {
const record = this.cache.get(key);
callback(record.err, record.data);
return this;
};
Memoized.prototype.on = function(
eventName, // string
listener // function, handler
// on('memoize', function(err, data))<|fim▁hole|> if (eventName in this.events) {
this.events[eventName] = listener;
}
};
Memoized.prototype.emit = function(
// Emit Collector events
eventName, // string
...args // rest arguments
) {
const event = this.events[eventName];
if (event) event(...args);
};
module.exports = {
memoize,
};<|fim▁end|>
|
// on('add', function(key, err, data))
// on('del', function(key))
// on('clear', function())
) {
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.